source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
J1OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h"
#include "Utilities/qmc_common.h"
#include "CPU/SIMD/aligned_allocator.hpp"
#include "CPU/SIMD/algorithm.hpp"
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for one-body Jastrow function using multiple functors
*/
template<class FT>
struct J1OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
///table index
const int myTableID;
///number of ions
int Nions;
///number of electrons
int Nelec;
///number of groups
int NumGroups;
///reference to the sources (ions)
const ParticleSet& Ions;
valT curAt;
valT curLap;
posT curGrad;
///\f$Vat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Vat;
aligned_vector<valT> U, dU, d2U, d3U;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
Vector<posT> Grad;
Vector<valT> Lap;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
J1OrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& els)
: WaveFunctionComponent("J1OrbitalSoA", obj_name), myTableID(els.addTable(ions)), Ions(ions)
{
if (myName.empty())
throw std::runtime_error("J1OrbitalSoA object name cannot be empty!");
initialize(els);
}
J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete;
~J1OrbitalSoA()
{
for (int i = 0; i < F.size(); ++i)
if (F[i] != nullptr)
delete F[i];
}
/* initialize storage */
void initialize(const ParticleSet& els)
{
Nions = Ions.getTotalNum();
NumGroups = Ions.getSpeciesSet().getTotalNum();
F.resize(std::max(NumGroups, 4), nullptr);
if (NumGroups > 1 && !Ions.IsGrouped)
{
NumGroups = 0;
}
Nelec = els.getTotalNum();
Vat.resize(Nelec);
Grad.resize(Nelec);
Lap.resize(Nelec);
U.resize(Nions);
dU.resize(Nions);
d2U.resize(Nions);
d3U.resize(Nions);
DistCompressed.resize(Nions);
DistIndice.resize(Nions);
}
void addFunc(int source_type, FT* afunc, int target_type = -1)
{
if (F[source_type] != nullptr)
delete F[source_type];
F[source_type] = afunc;
}
void recompute(const ParticleSet& P)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
computeU3(P, iat, d_ie.getDistRow(iat));
Vat[iat] = simd::accumulate_n(U.data(), Nions, valT());
Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.getDisplRow(iat), Grad[iat]);
}
}
LogValueType evaluateLog(const ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L)
{
return evaluateGL(P, G, L, true);
}
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int iel = 0; iel < Nelec; ++iel)
{
const auto& dist = d_ie.getDistRow(iel);
const auto& displ = d_ie.getDisplRow(iel);
for (int iat = 0; iat < Nions; iat++)
{
int gid = Ions.GroupID[iat];
auto* func = F[gid];
if (func != nullptr)
{
RealType r = dist[iat];
RealType rinv = 1.0 / r;
PosType dr = displ[iat];
func->evaluate(r, dudr, d2udr2);
grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
}
}
}
}
PsiValueType ratio(ParticleSet& P, int iat)
{
UpdateMode = ORB_PBYP_RATIO;
curAt = computeU(P.getDistTable(myTableID).getTempDists());
return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt));
}
inline void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).getDistRow(k)));
}
inline valT computeU(const DistRow& dist)
{
valT curVat(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curVat += F[gid]->evaluate(dist[c]);
}
}
return curVat;
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& dist = P.getDistTable(myTableID).getTempDists();
curAt = valT(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curAt += F[gid]->evaluate(dist[c]);
}
}
for (int i = 0; i < Nelec; ++i)
ratios[i] = std::exp(Vat[i] - curAt);
}
inline LogValueType evaluateGL(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false)
{
if (fromscratch)
recompute(P);
for (size_t iat = 0; iat < Nelec; ++iat)
G[iat] += Grad[iat];
for (size_t iat = 0; iat < Nelec; ++iat)
L[iat] -= Lap[iat];
return LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT());
}
/** compute gradient and lap
* @return lap
*/
inline valT accumulateGL(const valT* restrict du, const valT* restrict d2u, const DisplRow& displ, posT& grad) const
{
valT lap(0);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
//#pragma omp simd reduction(+:lap)
for (int jat = 0; jat < Nions; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
//#pragma omp simd reduction(+:s)
for (int jat = 0; jat < Nions; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return lap;
}
/** compute U, dU and d2U
* @param P quantum particleset
* @param iat the moving particle
* @param dist starting address of the distances of the ions wrt the iat-th particle
*/
inline void computeU3(const ParticleSet& P, int iat, const DistRow& dist)
{
if (NumGroups > 0)
{ //ions are grouped
constexpr valT czero(0);
std::fill_n(U.data(), Nions, czero);
std::fill_n(dU.data(), Nions, czero);
std::fill_n(d2U.data(), Nions, czero);
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] == nullptr)
continue;
F[jg]->evaluateVGL(-1, Ions.first(jg), Ions.last(jg), dist.data(), U.data(), dU.data(), d2U.data(),
DistCompressed.data(), DistIndice.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
{
U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]);
dU[c] /= dist[c];
}
}
}
}
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*/
GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); }
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*
* Using getTempDists(). curAt, curGrad and curLap are computed.
*/
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(myTableID).getTempDists());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad);
curAt = simd::accumulate_n(U.data(), Nions, valT());
grad_iat += curGrad;
return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt));
}
/** Rejected move. Nothing to do */
inline void restore(int iat) {}
/** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false)
{
if (UpdateMode == ORB_PBYP_RATIO)
{
computeU3(P, iat, P.getDistTable(myTableID).getTempDists());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad);
}
LogValue += Vat[iat] - curAt;
Vat[iat] = curAt;
Grad[iat] = curGrad;
Lap[iat] = curLap;
}
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Vat.begin(), Vat.end());
buf.add(Grad.begin(), Grad.end());
buf.add(Lap.begin(), Lap.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Vat.free();
Grad.free();
Lap.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec);
Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec);
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const
{
J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(myName, Ions, tqp);
j1copy->Optimizable = Optimizable;
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
j1copy->addFunc(i, new FT(*F[i]));
}
if (dPsi)
{
j1copy->dPsi = dPsi->makeClone(tqp);
}
return j1copy;
}
/**@{ WaveFunctionComponent virtual functions that are not essential for the development */
void reportStatus(std::ostream& os)
{
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
F[i]->myVars.print(os);
}
}
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
{
F[i]->checkInVariables(active);
F[i]->checkInVariables(myVars);
}
}
}
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->checkOutVariables(active);
if (dPsi)
dPsi->checkOutVariables(active);
}
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
if (dPsi)
dPsi->resetParameters(active);
}
/**@} */
inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const auto& dist = d_ie.getDistRow(iat);
const auto& displ = d_ie.getDisplRow(iat);
int gid = source.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
g_return -= dU[isrc] * rinv * dr;
}
}
return g_return;
}
inline GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int isrc,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const auto& dist = d_ie.getDistRow(iat);
const auto& displ = d_ie.getDisplRow(iat);
int gid = source.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
}
else
{
APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr")
}
g_return -= dU[isrc] * rinv * dr;
//The following terms depend only on the radial component r. Thus,
//we compute them and mix with position vectors to acquire the full
//cartesian vector objects.
valT grad_component = (d2U[isrc] - dU[isrc] * rinv);
valT lapl_component = d3U[isrc] + 2 * rinv * grad_component;
for (int idim = 0; idim < OHMMS_DIM; idim++)
{
grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component;
grad_grad[idim][iat][idim] += rinv * dU[isrc];
lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim];
}
}
return g_return;
}
};
} // namespace qmcplusplus
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
fill_nr_s8.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "cvhf.h"
#include "nr_direct.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env);
/*
* 8-fold symmetry, k>=l, k>=i>=j,
*/
static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri,
int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int *atm = envs->atm;
const int *bas = envs->bas;
const double *env = envs->env;
const int natm = envs->natm;
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const int *shls_slice = envs->shls_slice;
const CINTOpt *cintopt = envs->cintopt;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
double *cache = eri + di * dj * nao2;
int dims[4] = {nao, nao, dj, di};
int ksh, lsh, dk, dl, ij, k, l;
int shls[4];
double *peri;
shls[2] = jsh;
shls[3] = ish;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
shls[0] = lsh;
shls[1] = ksh;
peri = eri + ao_loc[ksh] * nao + ao_loc[lsh];
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
(*intor)(peri, dims, shls, atm, natm, bas, nbas, env,
cintopt, cache);
} else {
for (ij = 0; ij < di*dj; ij++) {
for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) {
for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) {
peri[k*nao+l] = 0;
} }
peri += nao2;
}
}
} }
}
static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const CINTOpt *cintopt = envs->cintopt;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int i, j, k, l, i0, j0, kl;
size_t ij0;
double *peri, *pbuf;
fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs);
for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) {
for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) {
if (i0 >= j0) {
ij0 = i0*(i0+1)/2 + j0;
peri = eri + ij0*(ij0+1)/2;
pbuf = buf + nao2 * (i*dj+j);
for (kl = 0, k = 0; k < i0; k++) {
for (l = 0; l <= k; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
} }
// k == i0
for (l = 0; l <= j0; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
}
}
} }
}
void GTO2e_cart_or_sph(int (*intor)(), double *eri, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
CINTOpt *cintopt;
int2e_optimizer(&cintopt, atm, natm, bas, nbas, env);
IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL,
cintopt, 1};
CVHFOpt *vhfopt;
CVHFnr_optimizer(&vhfopt, atm, natm, bas, nbas, env);
vhfopt->fprescreen = CVHFnr_schwarz_cond;
int shls_slice[] = {0, nbas};
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, eri, ao_loc, nbas, envs, vhfopt)
{
int i, j, ij;
double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size));
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - (i*(i+1)/2);
store_ij(intor, eri, buf, i, j, vhfopt, &envs);
}
free(buf);
}
CVHFdel_optimizer(&vhfopt);
CINTdel_optimizer(&cintopt);
}
|
GB_bitmap_emult_template.c | //------------------------------------------------------------------------------
// GB_bitmap_emult_template: C = A.*B, C<M>=A.*B, and C<!M>=A.*B, C bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap. A and B are bitmap or full. M depends on the method
{
//--------------------------------------------------------------------------
// get C, A, and B
//--------------------------------------------------------------------------
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
const int64_t vlen = A->vlen ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (A) || GB_as_if_full (B)) ;
const bool A_iso = A->iso ;
const bool B_iso = B->iso ;
int8_t *restrict Cb = C->b ;
const int64_t cnz = GB_nnz_held (C) ;
#ifdef GB_ISO_EMULT
ASSERT (C->iso) ;
#else
ASSERT (!C->iso) ;
ASSERT (!(A_iso && B_iso)) ; // one of A or B can be iso, but not both
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
//--------------------------------------------------------------------------
// C=A.*B, C<M>=A.*B, or C<!M>=A.*B: C is bitmap
//--------------------------------------------------------------------------
// TODO modify this method so it can modify C in-place, and also use the
// accum operator.
int64_t cnvals = 0 ;
if (ewise_method == GB_EMULT_METHOD5)
{
//----------------------------------------------------------------------
// Method5: C is bitmap, M is not present
//----------------------------------------------------------------------
// ------------------------------------------
// C = A .* B
// ------------------------------------------
// bitmap . bitmap bitmap (method: 5)
// bitmap . bitmap full (method: 5)
// bitmap . full bitmap (method: 5)
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (GBB (Ab, p) && GBB (Bb,p))
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_EMULT
GB_GETA (aij, Ax, p, A_iso) ;
GB_GETB (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
cnvals += task_cnvals ;
}
}
else if (ewise_method == GB_EMULT_METHOD6)
{
//----------------------------------------------------------------------
// Method6: C is bitmap, !M is sparse or hyper
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M>= A .* B
// ------------------------------------------
// bitmap sparse bitmap bitmap (method: 6)
// bitmap sparse bitmap full (method: 6)
// bitmap sparse full bitmap (method: 6)
// M is sparse and complemented. If M is sparse and not
// complemented, then C is constructed as sparse, not bitmap.
ASSERT (M != NULL) ;
ASSERT (Mask_comp) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
// C(i,j) = A(i,j) .* B(i,j) can only be computed where M(i,j) is
// not present in the sparse pattern of M, and where it is present
// but equal to zero.
//----------------------------------------------------------------------
// scatter M into the C bitmap
//----------------------------------------------------------------------
GB_bitmap_M_scatter_whole (C, M, Mask_struct, GB_BITMAP_M_SCATTER_SET_2,
M_ek_slicing, M_ntasks, M_nthreads, Context) ;
// C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1.
// These positions will not be computed in C(i,j). C(i,j) can only
// be modified where Cb [p] is zero.
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (Cb [p] == 0)
{
// M(i,j) is zero, so C(i,j) can be computed
if (GBB (Ab, p) && GBB (Bb, p))
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_EMULT
GB_GETA (aij, Ax, p, A_iso) ;
GB_GETB (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
}
else // if (ewise_method == GB_EMULT_METHOD7)
{
//----------------------------------------------------------------------
// Method7: C is bitmap; M is bitmap or full
//----------------------------------------------------------------------
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// bitmap bitmap bitmap bitmap (method: 7)
// bitmap bitmap bitmap full (method: 7)
// bitmap bitmap full bitmap (method: 7)
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// bitmap full bitmap bitmap (method: 7)
// bitmap full bitmap full (method: 7)
// bitmap full full bitmap (method: 7)
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// bitmap bitmap bitmap bitmap (method: 7)
// bitmap bitmap bitmap full (method: 7)
// bitmap bitmap full bitmap (method: 7)
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// bitmap full bitmap bitmap (method: 7)
// bitmap full bitmap full (method: 7)
// bitmap full full bitmap (method: 7)
ASSERT (GB_IS_BITMAP (M) || GB_IS_FULL (M)) ;
const int8_t *restrict Mb = M->b ;
const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
size_t msize = M->type->size ;
#undef GB_GET_MIJ
#define GB_GET_MIJ(p) \
bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \
if (Mask_comp) mij = !mij ; /* TODO: use ^ */
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
// M(i,j) is true, so C(i,j) can be computed
if (GBB (Ab, p) && GBB (Bb, p))
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_EMULT
GB_GETA (aij, Ax, p, A_iso) ;
GB_GETB (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
}
C->nvals = cnvals ;
}
|
convolutiondepthwise_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
__builtin_prefetch(r3 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _sum10 = _bias0;
v4f32 _sum11 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
_sum10 = __msa_fmadd_w(_sum10, _k00, _r10);
_sum10 = __msa_fmadd_w(_sum10, _k01, _r11);
_sum10 = __msa_fmadd_w(_sum10, _k02, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k00, _r11);
_sum11 = __msa_fmadd_w(_sum11, _k01, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k02, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
_sum10 = __msa_fmadd_w(_sum10, _k10, _r20);
_sum10 = __msa_fmadd_w(_sum10, _k11, _r21);
_sum10 = __msa_fmadd_w(_sum10, _k12, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k10, _r21);
_sum11 = __msa_fmadd_w(_sum11, _k11, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k12, _r23);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
_sum10 = __msa_fmadd_w(_sum10, _k20, _r30);
_sum10 = __msa_fmadd_w(_sum10, _k21, _r31);
_sum10 = __msa_fmadd_w(_sum10, _k22, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k20, _r31);
_sum11 = __msa_fmadd_w(_sum11, _k21, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k22, _r33);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum10, outptr1, 0);
__msa_st_w((v4i32)_sum11, outptr1 + 4, 0);
outptr0 += 4 * 2;
outptr1 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
__builtin_prefetch(r3 + 96);
v4f32 _sum0 = _bias0;
v4f32 _sum1 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum1 = __msa_fmadd_w(_sum1, _k00, _r10);
_sum1 = __msa_fmadd_w(_sum1, _k01, _r11);
_sum1 = __msa_fmadd_w(_sum1, _k02, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum1 = __msa_fmadd_w(_sum1, _k10, _r20);
_sum1 = __msa_fmadd_w(_sum1, _k11, _r21);
_sum1 = __msa_fmadd_w(_sum1, _k12, _r22);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
_sum1 = __msa_fmadd_w(_sum1, _k20, _r30);
_sum1 = __msa_fmadd_w(_sum1, _k21, _r31);
_sum1 = __msa_fmadd_w(_sum1, _k22, _r32);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r03);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r13);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r23);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r24);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_binop__iseq_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc32)
// C=scalar+B GB (_bind1st__iseq_fc32)
// C=scalar+B' GB (_bind1st_tran__iseq_fc32)
// C=A+scalar GB (_bind2nd__iseq_fc32)
// C=A'+scalar GB (_bind2nd_tran__iseq_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_iseq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC32 || GxB_NO_ISEQ_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_iseq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_iseq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bcnn_avgpool_layer.c | /*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "bcnn_avgpool_layer.h"
#include <bh/bh_log.h>
#include <bh/bh_string.h>
#include "bcnn_net.h"
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
bcnn_status bcnn_add_avgpool_layer(bcnn_net *net, const char *src_id,
const char *dst_id) {
bcnn_node node = {0};
bcnn_tensor dst_tensor = {0};
if (net->num_nodes > 0) {
int is_src_node_found = 0;
for (int i = net->num_tensors - 1; i >= 0; --i) {
if (strcmp(net->tensors[i].name, src_id) == 0) {
bcnn_node_add_input(net, &node, i);
is_src_node_found = 1;
break;
}
}
BCNN_CHECK_AND_LOG(
net->log_ctx, is_src_node_found, BCNN_INVALID_PARAMETER,
"Avgpool layer: invalid input node name %s\n", src_id);
} else {
bcnn_node_add_input(net, &node, 0);
}
bcnn_tensor_set_shape(&dst_tensor,
net->tensors[node.src[0]].n, // batch size
net->tensors[node.src[0]].c, // depth
1, // height
1, // width
1);
bcnn_tensor_allocate(&dst_tensor, net->mode);
bh_strfill(&dst_tensor.name, dst_id);
// Add node to net
bcnn_net_add_tensor(net, dst_tensor);
// Add tensor output index to node
bcnn_node_add_output(net, &node, net->num_tensors - 1);
node.type = BCNN_LAYER_AVGPOOL;
node.forward = bcnn_forward_avgpool_layer;
node.backward = bcnn_backward_avgpool_layer;
bcnn_net_add_node(net, node);
char node_opname[256];
snprintf(node_opname, 256, BH_LOG_BOLDBLUE "[Avgpool]" BH_LOG_RESET);
BCNN_INFO(net->log_ctx,
"%-48s %-8s (%4d x%4d x%4d) -> %-8s (%4d x%4d x%4d)\n",
node_opname, net->tensors[node.src[0]].name,
net->tensors[node.src[0]].w, net->tensors[node.src[0]].h,
net->tensors[node.src[0]].c, net->tensors[node.dst[0]].name,
net->tensors[node.dst[0]].w, net->tensors[node.dst[0]].h,
net->tensors[node.dst[0]].c);
return 0;
}
void bcnn_forward_avgpool_layer_cpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
for (int b = 0; b < src_tensor->n; ++b) {
#pragma omp parallel for num_threads(net->num_threads)
for (int k = 0; k < src_tensor->c; ++k) {
int idx = k + b * src_tensor->c;
dst_tensor->data[idx] = 0;
for (int i = 0; i < src_tensor->h * src_tensor->w; ++i) {
int offset =
src_tensor->h * src_tensor->w * (k + b * src_tensor->c) + i;
dst_tensor->data[idx] += src_tensor->data[offset];
}
dst_tensor->data[idx] /= src_tensor->h * src_tensor->w;
}
}
return;
}
void bcnn_forward_avgpool_layer(bcnn_net *net, bcnn_node *node) {
#ifdef BCNN_USE_CUDA
return bcnn_forward_avgpool_layer_gpu(net, node);
#else
return bcnn_forward_avgpool_layer_cpu(net, node);
#endif
}
void bcnn_backward_avgpool_layer_cpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
for (int b = 0; b < src_tensor->n; ++b) {
for (int k = 0; k < src_tensor->c; ++k) {
int idx = k + b * src_tensor->c;
for (int i = 0; i < src_tensor->h * src_tensor->w; ++i) {
int offset =
src_tensor->h * src_tensor->w * (k + b * src_tensor->c) + i;
src_tensor->grad_data[offset] +=
dst_tensor->grad_data[idx] /
(src_tensor->h * src_tensor->w);
}
}
}
return;
}
void bcnn_backward_avgpool_layer(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src = &net->tensors[node->src[0]];
bcnn_tensor *dst = &net->tensors[node->dst[0]];
#ifdef BCNN_USE_CUDA
return bcnn_backward_avgpool_layer_gpu(net, node);
#else
return bcnn_backward_avgpool_layer_cpu(net, node);
#endif
}
|
GB_unaryop__lnot_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_int32
// op(A') function: GB_tran__lnot_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_int32
(
uint32_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zgbtrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations A * X = B with triangular factorization
* computed by plasma_zpbtrf or plasma_zgbtrf.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_zgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[in] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgbtrs
* @sa plasma_cgbtrs
* @sa plasma_dgbtrs
* @sa plasma_sgbtrs
* @sa plasma_zpbtrf
*
******************************************************************************/
int plasma_zgbtrs(plasma_enum_t trans, int n, int kl, int ku, int nrhs,
plasma_complex64_t *pAB, int ldab,
int *ipiv,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kl < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -4;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku,
&AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zpb2desc(pAB, ldab, AB, sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, sequence, &request);
// Call the tile async function.
plasma_omp_zgbtrs(trans, AB, ipiv, B, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations using previously
* computed factorization.
* Non-blocking tile version of plasma_zgbtrs().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] AB
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_zpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgbtrs
* @sa plasma_omp_zgbtrs
* @sa plasma_omp_cgbtrs
* @sa plasma_omp_dgbtrs
* @sa plasma_omp_sgbtrs
* @sa plasma_omp_zgbtrf
*
******************************************************************************/
void plasma_omp_zgbtrs(plasma_enum_t trans, plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
if (trans == PlasmaNoTrans) {
plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
else {
plasma_pztbsm(PlasmaLeft, PlasmaUpper, trans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaLower, trans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
}
|
GB_binop__bset_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint32)
// C=scalar+B GB (_bind1st__bset_uint32)
// C=scalar+B' GB (_bind1st_tran__bset_uint32)
// C=A+scalar GB (_bind2nd__bset_uint32)
// C=A'+scalar GB (_bind2nd_tran__bset_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT32 || GxB_NO_BSET_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BenchUtils.h | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
#include <functional>
#include <vector>
#include <immintrin.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "./AlignedVec.h"
namespace fbgemm {
template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high);
aligned_vector<float> getRandomSparseVector(
unsigned size,
float fractionNonZeros = 1.0);
void llc_flush(std::vector<char>& llc);
// Same as omp_get_max_threads() when OpenMP is available, otherwise 1
int fbgemm_get_max_threads();
// Same as omp_get_num_threads() when OpenMP is available, otherwise 1
int fbgemm_get_num_threads();
// Same as omp_get_thread_num() when OpenMP is available, otherwise 0
int fbgemm_get_thread_num();
template <typename T>
void cache_evict(const T& vec) {
auto const size = vec.size();
auto const elemSize = sizeof(typename T::value_type);
auto const dataSize = size * elemSize;
const char* data = reinterpret_cast<const char*>(vec.data());
constexpr int CACHE_LINE_SIZE = 64;
for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) {
_mm_clflush(&data[i]);
}
}
/**
* Parse application command line arguments
*
*/
int parseArgumentInt(
int argc,
const char* argv[],
const char* arg,
int non_exist_val,
int def_val);
bool parseArgumentBool(
int argc,
const char* argv[],
const char* arg,
bool def_val);
namespace {
struct empty_flush {
void operator()() const {}
};
} // namespace
/**
* @param Fn functor to execute
* @param Fe data eviction functor
*/
template <class Fn, class Fe = std::function<void()>>
double measureWithWarmup(
Fn&& fn,
int warmupIterations,
int measuredIterations,
const Fe& fe = empty_flush(),
bool useOpenMP = false) {
for (int i = 0; i < warmupIterations; ++i) {
// Evict data first
fe();
fn();
}
double ttot = 0.0;
#ifdef _OPENMP
#pragma omp parallel if (useOpenMP)
#endif
for (int i = 0; i < measuredIterations; ++i) {
int thread_id = 0;
std::chrono::time_point<std::chrono::high_resolution_clock> start, end;
#ifdef _OPENMP
if (useOpenMP) {
thread_id = omp_get_thread_num();
}
#endif
if (thread_id == 0) {
fe();
}
#ifdef _OPENMP
if (useOpenMP) {
#pragma omp barrier
}
#endif
start = std::chrono::high_resolution_clock::now();
fn();
end = std::chrono::high_resolution_clock::now();
auto dur =
std::chrono::duration_cast<std::chrono::nanoseconds>(end - start);
if (thread_id == 0) {
// TODO: measure load imbalance
ttot += dur.count();
}
}
return ttot / 1e9 / measuredIterations;
}
} // namespace fbgemm
|
GB_binop__iseq_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_uint64
// A.*B function (eWiseMult): GB_AemultB__iseq_uint64
// A*D function (colscale): GB_AxD__iseq_uint64
// D*A function (rowscale): GB_DxB__iseq_uint64
// C+=B function (dense accum): GB_Cdense_accumB__iseq_uint64
// C+=b function (dense accum): GB_Cdense_accumb__iseq_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint64
// C=scalar+B GB_bind1st__iseq_uint64
// C=scalar+B' GB_bind1st_tran__iseq_uint64
// C=A+scalar GB_bind2nd__iseq_uint64
// C=A'+scalar GB_bind2nd_tran__iseq_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT64 || GxB_NO_ISEQ_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__iseq_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cloudkeychain_fmt_plug.c | /* 1Password Cloud Keychain cracker patch for JtR. Hacked together during
* April of 2013 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru.kholia at gmail.com>,
* Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and Copyright (c) 2012
* magnum, and it is hereby released to the general public under the following
* terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This software is based on "onepasswordpy" project but no actual code is
* borrowed from it.
*
* "onepasswordpy" project is at https://github.com/Roguelazer/onepasswordpy
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cloud_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cloud_keychain);
#else
#include <string.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "stdint.h"
#include "sha2.h"
#include "pbkdf2_hmac_sha512.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "cloudkeychain"
#define FORMAT_NAME "1Password Cloud Keychain"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define HASH_LENGTH 64
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 111
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 32
#define IVLEN 16
#define CTLEN 2048
#define EHMLEN 32
#define PAD_SIZE 128
static struct fmt_tests cloud_keychain_tests[] = {
{"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[SALTLEN];
unsigned int iterations;
unsigned int masterkeylen;
unsigned char masterkey[CTLEN];
unsigned int plaintextlen;
unsigned int ivlen;
unsigned char iv[32];
unsigned int cryptextlen;
unsigned char cryptext[CTLEN];
unsigned int expectedhmaclen;
unsigned char expectedhmac[EHMLEN];
unsigned int hmacdatalen;
unsigned char hmacdata[CTLEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len;
if (strncmp(ciphertext, "$cloudkeychain$", 15) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 15;
if ((p = strtokm(ctcopy, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlenl(p)/2 != len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* masterkey length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* masterkey */
goto err;
if (hexlenl(p)/2 != len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* plaintext length */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > IVLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv */
goto err;
if (hexlenl(p) / 2 != len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cryptext length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > CTLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cryptext */
goto err;
if (hexlenl(p)/2 != len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > EHMLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac */
goto err;
if (hexlenl(p)/2 != len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > CTLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata */
goto err;
if (hexlenl(p)/2 != len)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 15; /* skip over "$cloudkeychain$" */
p = strtokm(ctcopy, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.iterations = atou(p);
p = strtokm(NULL, "$");
cs.masterkeylen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.masterkeylen; i++)
cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.plaintextlen = atou(p);
p = strtokm(NULL, "$");
cs.ivlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.cryptextlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.cryptextlen; i++)
cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.expectedhmaclen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.expectedhmaclen; i++)
cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.hmacdatalen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.hmacdatalen; i++)
cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt,
uint32_t saltlen, uint32_t add, uint64_t * ret)
{
uint8_t i, ipad[64], opad[64];
SHA256_CTX ctx;
memset(ipad, 0x36, 64);
memset(opad, 0x5c, 64);
for (i = 0; i < passlen; i++) {
ipad[i] ^= pass[i];
opad[i] ^= pass[i];
}
SHA256_Init(&ctx);
SHA256_Update(&ctx, ipad, 64);
SHA256_Update(&ctx, salt, saltlen);
if (add > 0) {
#if ARCH_LITTLE_ENDIAN
add = JOHNSWAP(add);
#endif
SHA256_Update(&ctx, &add, 4); }
SHA256_Final((uint8_t *) ret, &ctx);
SHA256_Init(&ctx);
SHA256_Update(&ctx, opad, 64);
SHA256_Update(&ctx, (uint8_t *) ret, 32);
SHA256_Final((uint8_t *) ret, &ctx);
}
static int ckcdecrypt(unsigned char *key)
{
uint64_t tmp[8];
hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp);
if (!memcmp(tmp, cur_salt->expectedhmac, 32))
return 1;
else
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA512
int lens[SSE_GROUP_SZ_SHA512], i;
unsigned char *pin[SSE_GROUP_SZ_SHA512];
uint64_t key[SSE_GROUP_SZ_SHA512][8];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (ARCH_WORD_32*)(key[i]);
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0);
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i)
cracked[index+i] = ckcdecrypt((unsigned char*)(key[i]));
#else
uint64_t key[8];
pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),
cur_salt->salt, cur_salt->saltlen,
cur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0);
cracked[index] = ckcdecrypt((unsigned char*)key);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void cloud_keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->iterations;
}
struct fmt_main fmt_cloud_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
cloud_keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
cloud_keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
initAtoms.c | /// \file
/// Initialize the atom configuration.
#include "initAtoms.h"
#include <math.h>
#include <assert.h>
#include "constants.h"
#include "decomposition.h"
#include "parallel.h"
#include "random.h"
#include "linkCells.h"
#include "timestep.h"
#include "memUtils.h"
#include "performanceTimers.h"
static void computeVcm(SimFlat* s, real_t vcm[3]);
/// \details
/// Call functions such as createFccLattice and setTemperature to set up
/// initial atom positions and momenta.
Atoms* initAtoms(LinkCell* boxes)
{
Atoms* atoms = comdMalloc(sizeof(Atoms));
int maxTotalAtoms = MAXATOMS*boxes->nTotalBoxes;
//#pragma sst delete
{
atoms->gid = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->iSpecies = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->r = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->p = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->f = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->U = (real_t*)comdMalloc(maxTotalAtoms*sizeof(real_t));
}
atoms->nLocal = 0;
atoms->nGlobal = 0;
#pragma sst compute
for (int iOff = 0; iOff < maxTotalAtoms; iOff++)
{
atoms->gid[iOff] = 0;
atoms->iSpecies[iOff] = 0;
zeroReal3(atoms->r[iOff]);
zeroReal3(atoms->p[iOff]);
zeroReal3(atoms->f[iOff]);
atoms->U[iOff] = 0.;
}
return atoms;
}
void destroyAtoms(Atoms *atoms)
{
freeMe(atoms,gid);
freeMe(atoms,iSpecies);
freeMe(atoms,r);
freeMe(atoms,p);
freeMe(atoms,f);
freeMe(atoms,U);
comdFree(atoms);
}
/// Creates atom positions on a face centered cubic (FCC) lattice with
/// nx * ny * nz unit cells and lattice constant lat.
/// Set momenta to zero.
void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat* s)
{
const real_t* localMin = s->domain->localMin; // alias
const real_t* localMax = s->domain->localMax; // alias
int nb = 4; // number of atoms in the basis
real3 basis[4] = { {0.25, 0.25, 0.25},
{0.25, 0.75, 0.75},
{0.75, 0.25, 0.75},
{0.75, 0.75, 0.25} };
// create and place atoms
int begin[3];
int end[3];
for (int ii=0; ii<3; ++ii)
{
begin[ii] = floor(localMin[ii]/lat);
end[ii] = ceil (localMax[ii]/lat);
}
real_t px,py,pz;
px=py=pz=0.0;
#pragma sst compute
for (int ix=begin[0]; ix<end[0]; ++ix)
for (int iy=begin[1]; iy<end[1]; ++iy)
for (int iz=begin[2]; iz<end[2]; ++iz)
for (int ib=0; ib<nb; ++ib)
{
real_t rx = (ix+basis[ib][0]) * lat;
real_t ry = (iy+basis[ib][1]) * lat;
real_t rz = (iz+basis[ib][2]) * lat;
if (rx < localMin[0] || rx >= localMax[0]) continue;
if (ry < localMin[1] || ry >= localMax[1]) continue;
if (rz < localMin[2] || rz >= localMax[2]) continue;
int id = ib+nb*(iz+nz*(iy+ny*(ix)));
putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz);
}
#pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz))
s->atoms->nGlobal = 0;
if (getMyRank() == 0)
printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n",
nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal);
#pragma sst init s->atoms->nGlobal / getNRanks()
s->atoms->nLocal = s->atoms->nLocal;
s->boxes->nTotalAtoms = s->atoms->nLocal;
// set total atoms in simulation
startTimer(commReduceTimer);
addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1);
stopTimer(commReduceTimer);
#pragma sst delete
assert(s->atoms->nGlobal == nb*nx*ny*nz);
}
/// Sets the center of mass velocity of the system.
/// \param [in] newVcm The desired center of mass velocity.
void setVcm(SimFlat* s, real_t newVcm[3])
{
real_t oldVcm[3];
computeVcm(s, oldVcm);
real_t vShift[3];
vShift[0] = (newVcm[0] - oldVcm[0]);
vShift[1] = (newVcm[1] - oldVcm[1]);
vShift[2] = (newVcm[2] - oldVcm[2]);
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iSpecies = s->atoms->iSpecies[iOff];
real_t mass = s->species[iSpecies].mass;
s->atoms->p[iOff][0] += mass * vShift[0];
s->atoms->p[iOff][1] += mass * vShift[1];
s->atoms->p[iOff][2] += mass * vShift[2];
}
}
}
/// Sets the temperature of system.
///
/// Selects atom velocities randomly from a boltzmann (equilibrium)
/// distribution that corresponds to the specified temperature. This
/// random process will typically result in a small, but non zero center
/// of mass velocity and a small difference from the specified
/// temperature. For typical MD runs these small differences are
/// unimportant, However, to avoid possible confusion, we set the center
/// of mass velocity to zero and scale the velocities to exactly match
/// the input temperature.
void setTemperature(SimFlat* s, real_t temperature)
{
s->initialTemp = temperature;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// set initial velocities for the distribution
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iType = s->atoms->iSpecies[iOff];
real_t mass = s->species[iType].mass;
real_t sigma = sqrt(kB_eV * temperature/mass);
uint64_t seed = mkSeed(s->atoms->gid[iOff], 123);
s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed);
}
}
// compute the resulting temperature
// kinetic energy = 3/2 kB * Temperature
if (temperature == 0.0) return;
real_t vZero[3] = {0., 0., 0.};
setVcm(s, vZero);
kineticEnergy(s);
real_t temp = (s->eKinetic/s->atoms->nGlobal)/kB_eV/1.5;
// scale the velocities to achieve the target temperature
real_t scaleFactor = sqrt(temperature/temp);
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
s->atoms->p[iOff][0] *= scaleFactor;
s->atoms->p[iOff][1] *= scaleFactor;
s->atoms->p[iOff][2] *= scaleFactor;
}
}
kineticEnergy(s);
temp = s->eKinetic/s->atoms->nGlobal/kB_eV/1.5;
}
/// Add a random displacement to the atom positions.
/// Atoms are displaced by a random distance in the range
/// [-delta, +delta] along each axis.
/// \param [in] delta The maximum displacement (along each axis).
void randomDisplacements(SimFlat* s, real_t delta)
{
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
uint64_t seed = mkSeed(s->atoms->gid[iOff], 457);
s->atoms->r[iOff][0] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][1] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][2] += (2.0*lcg61(&seed)-1.0) * delta;
}
}
}
/// Computes the center of mass velocity of the system.
void computeVcm(SimFlat* s, real_t vcm[3])
{
real_t vcmLocal[4] = {0., 0., 0., 0.};
real_t vcmSum[4] = {0., 0., 0., 0.};
real_t v0 = 0.0;
real_t v1 = 0.0;
real_t v2 = 0.0;
real_t v3 = 0.0;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// sum the momenta and particle masses
#pragma omp parallel for reduction(+:v0) reduction(+:v1) reduction(+:v2) reduction(+:v3)
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
v0 += s->atoms->p[iOff][0];
v1 += s->atoms->p[iOff][1];
v2 += s->atoms->p[iOff][2];
int iSpecies = s->atoms->iSpecies[iOff];
v3 += s->species[iSpecies].mass;
}
}
vcmLocal[0] = v0;
vcmLocal[1] = v1;
vcmLocal[2] = v2;
vcmLocal[3] = v3;
startTimer(commReduceTimer);
addRealParallel(vcmLocal, vcmSum, 4);
stopTimer(commReduceTimer);
real_t totalMass = vcmSum[3];
vcm[0] = vcmSum[0]/totalMass;
vcm[1] = vcmSum[1]/totalMass;
vcm[2] = vcmSum[2]/totalMass;
}
|
heller_gaussian.h | #ifndef METHODS_HELLER_GAUSSIAN_H
#define METHODS_HELLER_GAUSSIAN_H
#include "heller_cwa.h"
namespace method {
// use your method name to create a subspace for your
// implementation of details
namespace heller_gaussian {
namespace details {
struct HellerParam {
// exp(a[0] p^2 + a[1] p + a[2] p q + a[3] q + a[4] q^2 + a[5])
// alpha = 0.5 / a[0]
// delta = Im[a[1]]
// epsilon = Im[a[2]]
// p_bar = Re[a[1] * a[0]]
arma::cx_vec a;
math::Polynomial <cx_double> V_eff_0;
double mass;
};
inline
double I_function(
const arma::cx_vec & a,
const arma::cx_vec & a_derivatives,
const math::Polynomial <cx_double> & V_eff_0,
const double mass) {
const auto E_func_abs = heller_cwa::details::E_function(a, a_derivatives,
V_eff_0, mass).abs();
const auto E_func_squared = E_func_abs * E_func_abs;
const auto gaussian_abs = heller_cwa::details::heller_gaussian(a).abs();
const auto gaussian_squared = gaussian_abs * gaussian_abs;
return gaussian_squared.integral(E_func_squared);
}
inline
arma::vec I_function_derivative(
const arma::cx_vec & a,
const arma::cx_vec & a_derivatives,
const math::Polynomial <cx_double> & V_eff_0,
const double mass
) {
arma::vec result(12);
const auto E_func = heller_cwa::details::E_function(a, a_derivatives, V_eff_0,
mass);
const auto polynomial_term_list = lmat{{{0, 0, 1, 1, 2, 0}, {2, 1, 1, 0, 0, 0}}};
const auto gaussian_abs = heller_cwa::details::heller_gaussian(a).abs();
const auto gaussian_squared = gaussian_abs * gaussian_abs;
#pragma omp parallel for
for (arma::uword i = 0; i < 6; i++) {
const lvec E_derivative_term = polynomial_term_list.col(i);
const auto E_derivative =
math::polynomial::Term<double>(1.0, E_derivative_term);
result(i) = -2.0 * std::real(
gaussian_squared.integral(E_func.conj() * E_derivative));
result(i + 6) = 2.0 * std::imag(
gaussian_squared.integral(E_func.conj() * E_derivative));
}
return result;
}
inline
double I_function_gsl_wrapper(
const gsl_vector * a_derivatives,
void * param
) {
const HellerParam heller_param = *(HellerParam *) param;
const arma::cx_vec a = heller_param.a;
const arma::vec a_derivatives_all = gsl::convert_vec(a_derivatives);
const arma::cx_vec a_derivatives_arma =
arma::cx_vec{a_derivatives_all.rows(0, 5),
a_derivatives_all.rows(6, 11)};
return I_function(a,
a_derivatives_arma,
heller_param.V_eff_0,
heller_param.mass);
}
inline
void I_function_derivative_gsl_wrapper(
const gsl_vector * a_derivatives,
void * param,
gsl_vector * g
) {
const HellerParam heller_param = *(HellerParam *) param;
const arma::cx_vec a = heller_param.a;
const arma::vec a_derivatives_all = gsl::convert_vec(a_derivatives);
const arma::cx_vec a_derivatives_arma =
arma::cx_vec{a_derivatives_all.rows(0, 5),
a_derivatives_all.rows(6, 11)};
const auto result_pointer =
gsl::convert_vec(I_function_derivative(a,
a_derivatives_arma,
heller_param.V_eff_0,
heller_param.mass));
gsl_vector_memcpy(g, result_pointer);
gsl_vector_free(result_pointer);
}
inline
void I_function_fdf_gsl_wrapper(
const gsl_vector * a_derivatives,
void * param,
double * f,
gsl_vector * g
) {
I_function_derivative_gsl_wrapper(a_derivatives, param, g);
*f = I_function_gsl_wrapper(a_derivatives, param);
}
inline
arma::cx_vec a_derivative(HellerParam input,
const double initial_step_size,
const double tolerance,
const double gradient_tolerance,
const size_t total_steps) {
/* allocate memory for minimization process */
const auto minimizer_type = gsl_multimin_fdfminimizer_vector_bfgs2;
auto minimizer_environment = gsl_multimin_fdfminimizer_alloc(minimizer_type,
12);
/* assigning function to minimizer object */
gsl_multimin_function_fdf minimizer_object;
minimizer_object.f = &I_function_gsl_wrapper;
minimizer_object.df = &I_function_derivative_gsl_wrapper;
minimizer_object.fdf = &I_function_fdf_gsl_wrapper;
minimizer_object.n = 12;
minimizer_object.params = (void *) &input;
/* starting point */
const auto a_derivatives = gsl_vector_calloc(12);
/* set environment */
gsl_multimin_fdfminimizer_set(minimizer_environment,
&minimizer_object, a_derivatives,
initial_step_size, tolerance);
size_t iter = 0;
int status = GSL_CONTINUE;
do {
iter++;
status = gsl_multimin_fdfminimizer_iterate(minimizer_environment);
if (status) {
throw Error(gsl_strerror(status));
}
status = gsl_multimin_test_gradient(minimizer_environment->gradient,
gradient_tolerance);
if (status == GSL_SUCCESS) {
const arma::vec result = gsl::convert_vec(minimizer_environment->x);
gsl_multimin_fdfminimizer_free(minimizer_environment);
gsl_vector_free(a_derivatives);
return arma::cx_vec{result.rows(arma::span(0, 5)),
result.rows(arma::span(6, 11))};
}
} while (status == GSL_CONTINUE && iter < total_steps);
return arma::ones<arma::cx_vec>(6) * 2.8375;
}
} // namespace details
struct State {
public:
arma::mat points;
arma::vec weights;
arma::vec masses;
// Establish an easy way to construct your State
template<typename PhaseSpaceDistribution>
State(const PhaseSpaceDistribution & initial,
const arma::uvec & grid,
const arma::mat & range,
const arma::vec & masses) :
points(math::space::points_generate(grid, range)),
weights(arma::real(at(initial, points))),
masses(masses) {
if (grid.n_rows != range.n_rows) {
throw Error("Different dimension between the grid and the range");
}
if (grid.n_rows != 2 * masses.n_rows) {
throw Error("Different dimension between the grid and the masses");
}
}
template<typename PhaseSpaceDistribution>
State(const PhaseSpaceDistribution & initial,
const arma::uvec & grid,
const arma::mat & range) :
points(math::space::points_generate(grid, range)),
weights(arma::real(at(initial, points))),
masses(arma::ones<arma::vec>(grid.n_elem / 2)) {
if (grid.n_elem % 2 != 0) {
throw Error("Odd number of dimension - it is not likely a phase space");
}
if (grid.n_rows != range.n_rows) {
throw Error("Different dimension between the grid and the range");
}
if (grid.n_rows != 2 * masses.n_rows) {
throw Error("Different dimension between the grid and the masses");
}
}
inline
State(const arma::mat & points,
const arma::vec & weights,
const arma::vec & masses) :
points(points),
weights(weights),
masses(masses) {
if (points.n_cols != weights.n_elem) {
throw Error("Different number of points and corresponding weights");
}
if (points.n_rows != 2 * masses.n_rows) {
throw Error("Different dimension between the points and the masses");
}
}
inline
arma::uword dim() const {
return points.n_rows / 2;
}
inline
State normalise() const {
return State(this->points, this->weights / arma::sum(this->weights),
this->masses);
}
template<typename Function>
arma::vec expectation(const std::vector<Function> & function) const {
arma::vec result(function.size());
#pragma omp parallel for
for (arma::uword i = 0; i < result.n_elem; i++) {
if (function[i].dim() != this->dim() * 2) {
throw Error(
"The dimension of the function is invalid for the calculation of expectation");
}
result(i) = arma::dot(at(function[i], this->points), weights) /
arma::sum(weights);
}
return result;
}
template<typename Function>
double expectation(const Function & function) const {
const arma::vec result = at(function, this->points);
return arma::dot(result, weights) / arma::sum(weights);
}
inline
arma::vec positional_expectation() const {
arma::uword dim = this->dim();
return this->points.rows(0, dim - 1) * this->weights /
arma::sum(this->weights);
}
inline
arma::vec momentum_expectation() const {
arma::uword dim = this->dim();
return this->points.rows(dim, 2 * dim - 1) * this->weights /
arma::sum(this->weights);
}
State operator+(const State & B) const {
if (!arma::approx_equal(this->weights, B.weights, "abs_diff", 1e-16) ||
!arma::approx_equal(this->masses, B.masses, "abs_diff", 1e-16)) {
throw Error("Different cwa states are being added");
}
return State(this->points + B.points, this->weights, this->masses);
}
State operator*(const double B) const {
return State(this->points * B, this->weights, this->masses);
}
};
template<typename Potential>
struct Operator {
private:
PropagationType type = Classic;
public:
Potential potential;
Operator(const State & state,
const Potential & potential) :
potential(potential) {}
inline
PropagationType propagation_type() const {
return Classic;
}
State operator()(const State & state) const {
arma::mat p_submatrix = state.points.rows(state.dim(), 2 * state.dim() - 1);
p_submatrix.each_col() /= state.masses;
const arma::mat change_list =
arma::join_cols(p_submatrix,
cwa::details::force(potential,
state.points.rows(0, state.dim() -
1)));
return State(change_list, state.weights, state.masses);
}
};
} // namespace heller
}
#endif //METHODS_HELLER_GAUSSIAN_H
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
workgroup_size_option2.c | #include <stdio.h>
#include <omp.h>
int main() {
int num_threads = 0;
int N = 100000;
int a[N];
int b[N];
int c[N];
int i;
#pragma omp target map(from: num_threads)
{
num_threads = omp_get_num_threads();
}
printf("num_threads = %d\n", num_threads);
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
#pragma omp target parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams
{
#pragma omp distribute parallel for
for (int j = 0; j< N; j++)
a[j]=b[j];
#pragma omp distribute parallel for
for (int j = 0; j< N; j++)
c[j]=b[j];
}
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams distribute parallel for thread_limit(64)
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] || c[i] != b[i]) {
rc++;
printf ("Wrong value: a[%d]=%d c[%d]=%d\n", i, a[i], i, c[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
// Compiled with -fopenmp-gpu-threads-per-team=128
// Option specified workgroup size < default of 256 not honored at this point
/// CHECK: DEVID: 0 SGN:1 ConstWGSize:257 args: 1 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 257)
/// CHECK: DEVID: 0 SGN:2 ConstWGSize:256 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 256)
/// CHECK: DEVID: 0 SGN:3 ConstWGSize:257 args: 7 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 256)
/// CHECK: DEVID: 0 SGN:2 ConstWGSize:256 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 256)
/// CHECK: DEVID: 0 SGN:2 ConstWGSize:256 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 64)
|
whirlpool_fmt_plug.c | /* whirlpool cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_whirlpool_0;
extern struct fmt_main fmt_whirlpool_1;
extern struct fmt_main fmt_whirlpool;
#elif FMT_REGISTERS_H
john_register_one(&fmt_whirlpool_0);
john_register_one(&fmt_whirlpool_1);
john_register_one(&fmt_whirlpool);
#else
#include <string.h>
#include "arch.h"
#include "openssl_local_overrides.h"
#include "sph_whirlpool.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include <openssl/opensslv.h>
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include "openssl/whrlpool.h"
#endif
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 256
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Whirpool"
#define FORMAT_NAME ""
#define FORMAT_TAG "$whirlpool$"
#define TAG_LENGTH 11
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 64
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
static struct fmt_tests whirlpool_0_tests[] = {
{"B3E1AB6EAF640A34F784593F2074416ACCD3B8E62C620175FCA0997B1BA2347339AA0D79E754C308209EA36811DFA40C1C32F1A2B9004725D987D3635165D3C8", ""},
{NULL}
};
static struct fmt_tests whirlpool_1_tests[] = {
{"470F0409ABAA446E49667D4EBE12A14387CEDBD10DD17B8243CAD550A089DC0FEEA7AA40F6C2AAAB71C6EBD076E43C7CFCA0AD32567897DCB5969861049A0F5A", ""},
{NULL}
};
static struct fmt_tests whirlpool_tests[] = {
{"19FA61D75522A4669B44E39C1D2E1726C530232130D407F89AFEE0964997F7A73E83BE698B288FEBCF88E3E03C4F0757EA8964E59B63D93708B138CC42A66EB3", ""},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != 128)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int crypt_0(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_whirlpool0_context ctx;
sph_whirlpool0_init(&ctx);
sph_whirlpool0(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool0_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_1(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_whirlpool1_context ctx;
sph_whirlpool1_init(&ctx);
sph_whirlpool1(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool1_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_2(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
WHIRLPOOL_CTX ctx;
WHIRLPOOL_Init(&ctx);
WHIRLPOOL_Update(&ctx, saved_key[index], strlen(saved_key[index]));
WHIRLPOOL_Final((unsigned char*)crypt_out[index], &ctx);
#else
sph_whirlpool_context ctx;
sph_whirlpool_init(&ctx);
sph_whirlpool(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool_close(&ctx, (unsigned char*)crypt_out[index]);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void whirlpool_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_whirlpool_0 = {
{
"whirlpool0",
"",
"WHIRLPOOL-0 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL
},
#endif
whirlpool_0_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_0,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_whirlpool_1 = {
{
"whirlpool1",
"",
"WHIRLPOOL-1 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL
},
#endif
whirlpool_1_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_1,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_whirlpool = {
{
"whirlpool",
"",
"WHIRLPOOL " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
whirlpool_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_2,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
batched_banded_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/basic/types.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/alignment/utils.h>
#include <nvbio/basic/cuda/work_queue.h>
#include <nvbio/basic/strided_iterator.h>
#include <nvbio/alignment/batched_stream.h>
namespace nvbio {
namespace aln {
///@addtogroup private
///@{
template <uint32 BAND_LEN, typename stream_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void batched_banded_alignment_score(const stream_type& stream, const uint32 work_id)
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename stream_type::context_type context_type;
typedef typename stream_type::strings_type strings_type;
// load the alignment context
context_type context;
if (stream.init_context( work_id, &context ) == true)
{
// compute the end of the current DP matrix window
const uint32 len = equal<typename aligner_type::algorithm_tag,TextBlockingTag>() ?
stream.text_length( work_id, &context ) :
stream.pattern_length( work_id, &context );
// load the strings to be aligned
strings_type strings;
stream.load_strings( work_id, 0, len, &context, &strings );
// score the current DP matrix window
banded_alignment_score<BAND_LEN>(
stream.aligner(),
strings.pattern,
strings.quals,
strings.text,
context.min_score,
context.sink );
}
// handle the output
stream.output( work_id, &context );
}
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
__global__ void
__launch_bounds__(BLOCKDIM,MINBLOCKS)
batched_banded_alignment_score_kernel(const stream_type stream)
{
const uint32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= stream.size())
return;
batched_banded_alignment_score<BAND_LEN>( stream, tid );
}
///@} // end of private group
///
/// HostThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,HostThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// enact the batch execution
//
template <uint32 BAND_LEN, typename stream_type>
void BatchedBandedAlignmentScore<BAND_LEN,stream_type,HostThreadScheduler>::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int tid = 0; tid < int( stream.size() ); ++tid)
batched_banded_alignment_score<BAND_LEN>( stream, tid );
}
///
/// DeviceThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceThreadBlockScheduler<BLOCKDIM,MINBLOCKS> >
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// enact the batch execution
//
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
void BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceThreadBlockScheduler<BLOCKDIM,MINBLOCKS> >::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
const uint32 n_blocks = (stream.size() + BLOCKDIM-1) / BLOCKDIM;
batched_banded_alignment_score_kernel<BLOCKDIM,MINBLOCKS,BAND_LEN> <<<n_blocks, BLOCKDIM>>>( stream );
}
///
/// DeviceStagedThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceStagedThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename checkpoint_storage_type<aligner_type>::type cell_type;
/// return the per-element column storage size
///
static uint32 column_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
const uint32 column_size = uint32( BAND_LEN * sizeof(cell_type) );
return align<4>( column_size );
}
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return column_storage( max_pattern_len, max_text_len ) * 1024;
}
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return column_storage( max_pattern_len, max_text_len ) * stream_size;
}
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL)
{
const uint64 min_temp_size = min_temp_storage(
stream.max_pattern_length(),
stream.max_text_length(),
stream.size() );
thrust::device_vector<uint8> temp_dvec;
if (temp == NULL)
{
temp_size = nvbio::max( min_temp_size, temp_size );
temp_dvec.resize( temp_size );
temp = nvbio::device_view( temp_dvec );
}
// set the queue capacity based on available memory
const uint32 max_pattern_len = stream.max_pattern_length();
const uint32 max_text_len = stream.max_text_length();
const uint32 queue_capacity = uint32( temp_size / column_storage( max_pattern_len, max_text_len ) );
m_work_queue.set_capacity( queue_capacity );
// prepare the work stream
ScoreStream<stream_type> score_stream(
stream, // the alignments stream
temp, // band storage
NULL, // no need for checkpoints
queue_capacity ); // the queue capacity, used for the memory striding
// consume the work stream
m_work_queue.consume( score_stream );
}
private:
cuda::WorkQueue<
cuda::PersistentThreadsQueueTag,
BandedScoreUnit<BAND_LEN, stream_type>,
BLOCKDIM> m_work_queue;
};
// --- Banded Traceback --------------------------------------------------------------------------------------------------------- //
///@addtogroup private
///@{
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void batched_banded_alignment_traceback(stream_type& stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride, const uint32 work_id, const uint32 thread_id)
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename stream_type::context_type context_type;
typedef typename stream_type::strings_type strings_type;
// load the alignment context
context_type context;
if (stream.init_context( work_id, &context ) == false)
{
// handle the output
stream.output( work_id, &context );
return;
}
// compute the end of the current DP matrix window
const uint32 len = equal<typename aligner_type::algorithm_tag,PatternBlockingTag>() ?
stream.pattern_length( work_id, &context ) :
stream.text_length( work_id, &context );
// load the strings to be aligned
strings_type strings;
stream.load_strings( work_id, 0, len, &context, &strings );
// fetch the proper checkpoint storage
typedef strided_iterator<cell_type*> checkpoint_type;
checkpoint_type checkpoint = checkpoint_type( checkpoints + thread_id, stride );
// fetch the proper submatrix storage
typedef strided_iterator<uint32*> submatrix_storage_type;
submatrix_storage_type submatrix_storage = submatrix_storage_type( submatrices + thread_id, stride );
const uint32 BITS = direction_vector_traits<aligner_type>::BITS;
PackedStream<submatrix_storage_type,uint8,BITS,false> submatrix( submatrix_storage );
// score the current DP matrix window
context.alignment = banded_alignment_traceback<BAND_LEN, CHECKPOINTS>(
stream.aligner(),
strings.pattern,
strings.quals,
strings.text,
context.min_score,
context.backtracer,
checkpoint,
submatrix );
// handle the output
stream.output( work_id, &context );
}
template <uint32 BLOCKDIM, uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
__global__ void batched_banded_alignment_traceback_kernel(stream_type stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride)
{
const uint32 tid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (tid >= stream.size())
return;
batched_banded_alignment_traceback<BAND_LEN, CHECKPOINTS>( stream, checkpoints, submatrices, stride, tid, tid );
}
template <uint32 BLOCKDIM, uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
__global__ void persistent_banded_batched_alignment_traceback_kernel(stream_type stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride)
{
const uint32 grid_threads = gridDim.x * BLOCKDIM;
const uint32 thread_id = threadIdx.x + blockIdx.x*BLOCKDIM;
const uint32 stream_end = stream.size();
// let this CTA fetch all tiles at a grid-threads stride, starting from blockIdx.x*BLOCKDIM
for (uint32 stream_begin = 0; stream_begin < stream_end; stream_begin += grid_threads)
{
const uint32 work_id = thread_id + stream_begin;
if (work_id < stream_end)
batched_banded_alignment_traceback<BAND_LEN, CHECKPOINTS>( stream, checkpoints, submatrices, stride, work_id, thread_id );
}
}
///@} // end of private group
///
/// DeviceThreadScheduler specialization of BatchedAlignmentTraceback.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
struct BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS, stream_type,DeviceThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the per-element checkpoint storage size
///
static uint32 checkpoint_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
return align<4>( uint32( BAND_LEN * ((max_pattern_len + CHECKPOINTS-1) / CHECKPOINTS) * sizeof(cell_type) ) );
}
/// return the per-element submatrix storage size
///
static uint32 submatrix_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
typedef typename stream_type::aligner_type aligner_type;
const uint32 BITS = direction_vector_traits<aligner_type>::BITS;
const uint32 ELEMENTS_PER_WORD = 32 / BITS;
return ((BAND_LEN * CHECKPOINTS + ELEMENTS_PER_WORD-1) / ELEMENTS_PER_WORD) * sizeof(uint32);
}
/// return the per-element storage size
///
static uint32 element_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
return checkpoint_storage( max_pattern_len, max_text_len ) +
submatrix_storage( max_pattern_len, max_text_len );
}
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size);
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size);
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// return the minimum number of bytes required by the algorithm
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
uint64 BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS, stream_type,DeviceThreadScheduler>::min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return element_storage( max_pattern_len, max_text_len ) * 1024;
}
// return the maximum number of bytes required by the algorithm
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
uint64 BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS,stream_type,DeviceThreadScheduler>::max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return element_storage( max_pattern_len, max_text_len ) * stream_size;
}
// enact the batch execution
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
void BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS,stream_type,DeviceThreadScheduler>::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
const uint64 min_temp_size = min_temp_storage(
stream.max_pattern_length(),
stream.max_text_length(),
stream.size() );
thrust::device_vector<uint8> temp_dvec;
if (temp_size == 0u)
{
temp_dvec.resize( min_temp_size );
temp = nvbio::device_view( temp_dvec );
temp_size = min_temp_size;
}
// set the queue capacity based on available memory
const uint32 max_pattern_len = stream.max_pattern_length();
const uint32 max_text_len = stream.max_text_length();
const uint32 queue_capacity = uint32( temp_size / element_storage( max_pattern_len, max_text_len ) );
const uint64 checkpoints_size = checkpoint_storage( max_pattern_len, max_text_len );
if (queue_capacity >= stream.size())
{
const uint32 n_blocks = (stream.size() + BLOCKDIM-1) / BLOCKDIM;
cell_type* checkpoints = (cell_type*)(temp);
uint32* submatrices = (uint32*) (temp + checkpoints_size * stream.size());
batched_banded_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS> <<<n_blocks, BLOCKDIM>>>(
stream,
checkpoints,
submatrices,
stream.size() );
}
else
{
// compute the number of blocks we are going to launch
const uint32 n_blocks = nvbio::max( nvbio::min(
(uint32)cuda::max_active_blocks( persistent_banded_batched_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS,stream_type,cell_type>, BLOCKDIM, 0u ),
queue_capacity / BLOCKDIM ), 1u );
cell_type* checkpoints = (cell_type*)(temp);
uint32* submatrices = (uint32*) (temp + checkpoints_size * queue_capacity);
persistent_banded_batched_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS> <<<n_blocks, BLOCKDIM>>>(
stream,
checkpoints,
submatrices,
queue_capacity );
}
}
///@} // end of BatchAlignment group
///@} // end of the Alignment group
} // namespace alignment
} // namespace nvbio
|
depend-4.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo (int *p, int (*q)[10], int r[10], int s[10][10])
{
int a[10], b[10][10];
#pragma omp task depend (inout: p[-1:2])
;
#pragma omp task depend (inout: q[-1:2][2:4])
;
#pragma omp task depend (inout: q[-1:2][-2:4]) /* { dg-error "negative low bound in array section in" } */
;
#pragma omp task depend (inout: r[-1:2])
;
#pragma omp task depend (inout: s[-1:2][2:4])
;
#pragma omp task depend (inout: s[-1:2][-2:4]) /* { dg-error "negative low bound in array section in" } */
;
#pragma omp task depend (inout: a[-1:2]) /* { dg-error "negative low bound in array section in" } */
;
#pragma omp task depend (inout: b[-1:2][2:4]) /* { dg-error "negative low bound in array section in" } */
;
#pragma omp task depend (inout: b[1:2][-2:4]) /* { dg-error "negative low bound in array section in" } */
;
#pragma omp task depend (inout: p[2:-3]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: q[2:-3][:]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: q[2:3][0:-1]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: r[2:-5]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: s[2:-5][:]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: s[2:5][0:-4]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: a[2:-5]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: b[2:-5][0:10]) /* { dg-error "negative length in array section in" } */
;
#pragma omp task depend (inout: b[2:5][0:-4]) /* { dg-error "negative length in array section in" } */
;
}
|
parallel.h | #pragma once
namespace pbbs {
//***************************************
// All the pbbs library uses only four functions for
// accessing parallelism.
// These can be implemented on top of any scheduler.
//***************************************
// number of threads available from OS
// template <>
int num_workers();
// id of running thread, should be numbered from [0...num-workers)
int worker_id();
void set_num_workers(int n);
#ifdef SAGE
static int numanode();
#endif
// the granularity of a simple loop (e.g. adding one to each element
// of an array) to reasonably hide cost of scheduler
// #define PAR_GRANULARITY 2000
// parallel loop from start (inclusive) to end (exclusive) running
// function f.
// f should map long to void.
// granularity is the number of iterations to run sequentially
// if 0 (default) then the scheduler will decide
// conservative uses a safer scheduler
template <typename F>
static void parallel_for(long start, long end, F f, long granularity = 0,
bool conservative = false);
// runs the thunks left and right in parallel.
// both left and write should map void to void
// conservative uses a safer scheduler
template <typename Lf, typename Rf>
static void par_do(Lf left, Rf right, bool conservative = false);
template <typename A, typename Af, typename Df, typename F>
static void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity = 0,
bool conservative = false);
} // namespace pbbs
//***************************************
// cilkplus
#if defined(CILK)
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#include <cilk/reducer.h>
#include <iostream>
#include <sstream>
#define PAR_GRANULARITY 2000
namespace pbbs {
template <typename F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
if (granularity == 0)
cilk_for(long i = start; i < end; i++) f(i);
else if ((end - start) <= granularity)
for (long i = start; i < end; i++) f(i);
else {
long n = end - start;
long mid = (start + (9 * (n + 1)) / 16);
cilk_spawn parallel_for(start, mid, f, granularity);
parallel_for(mid, end, f, granularity);
cilk_sync;
}
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("cilk grainsize = 1") cilk_for(long i = start; i < end; i++) f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
cilk_spawn right();
left();
cilk_sync;
}
template <typename A>
class alloc_holder {
struct Monoid : cilk::monoid_base<A> {
static void reduce(A *left, A *right) {}
};
public:
cilk::reducer<Monoid> imp_;
alloc_holder() : imp_() {}
};
// TODO try parallel_for_1
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
alloc_holder<A> alloc;
parallel_for_1(start, end,
[&](size_t i) {
init_alloc(&alloc.imp_.view());
f(i, &(alloc.imp_.view()));
// finish_alloc(&(alloc.imp_.view()));
},
granularity, conservative);
}
inline int num_workers() { return __cilkrts_get_nworkers(); }
inline int worker_id() { return __cilkrts_get_worker_number(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler" << std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// openmp
#elif defined(OPENMP)
#include <omp.h>
#include <stddef.h>
#define PAR_GRANULARITY 200000
namespace pbbs {
extern bool in_par_do;
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("omp parallel for") for (long i = start; i < end; i++) f(i);
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
#pragma omp for schedule(dynamic, 1) nowait
for (long i = start; i < end; i++) f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
#pragma omp single
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
}
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A* alloc = nullptr;
#pragma omp parallel private(alloc)
{
alloc = new A();
init_alloc(alloc);
parallel_for_1(start, end, [&](size_t i) { f(i, alloc); }, granularity,
conservative);
//#pragma omp for schedule(dynamic, 1) nowait
// for(long i=start; i<end; i++) f(i, alloc);
finish_alloc(alloc);
}
}
inline int num_workers() { return omp_get_max_threads(); }
inline int worker_id() { return omp_get_thread_num(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler" << std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// Guy's scheduler (ABP)
#elif defined(HOMEGROWN)
#include "scheduler.h"
#define PAR_GRANULARITY 512
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
pbbs::global_scheduler.parfor(start, end, f, granularity, conservative);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
return pbbs::global_scheduler.pardo(left, right, conservative);
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
parallel_for(start, end,
[&](long i) {
static thread_local A* alloc = new A();
init_alloc(alloc);
f(i, alloc);
},
granularity, conservative);
// finish_alloc(alloc);
}
inline int num_workers() { return pbbs::global_scheduler.num_workers(); }
inline int worker_id() { return pbbs::global_scheduler.worker_id(); }
#ifdef SAGE
inline int numanode() { return pbbs::global_scheduler.numanode(); }
#endif
} // namespace pbbs
// c++
#else
#define PAR_GRANULARITY 1000
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
for (long i = start; i < end; i++) {
f(i);
}
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
left();
right();
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A* alloc = new A();
init_alloc(alloc);
for (long i = start; i < end; i++) {
f(i, alloc);
}
finish_alloc(alloc);
}
inline int num_workers() { return 1; }
inline int worker_id() { return 0; }
} // namespace pbbs
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(16*t3+Nx+3,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__ceil_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp32_fp32)
// op(A') function: GB (_unop_tran__ceil_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = ceilf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceilf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = ceilf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = ceilf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = ceilf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_teams.5.c | /*
* @@name: teams.5c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
extern void init(float *, float *, int);
extern void output(float *, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target teams map(to: v1[0:N], v2[:N]) map(from: p[0:N])
#pragma omp distribute simd
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
main.c | #include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include "lcg.h"
#include "utilities.h"
#include "render.h"
#define NUM_OF_THREADS 4
#define NUM_OF_ANTS_PER_THREAD (NUM_OF_CITIES/20)
#define NUM_OF_ANTS NUM_OF_THREADS*NUM_OF_ANTS_PER_THREAD
#define ALPHA 1
#define BETA 6
#define RHO 0.7
#define T0 1.0/(2*X_MAX+2*Y_MAX)
void citiesInitialization(LCG *lcg,city *cities);
void constructGraph(city *cities,double **adjMatrix);
void heuristicValuesInitialization(double **adjMatrix, double **heuristicValues);
void pherTrailsInitialization(double **pherTrails);
int calcNextCity(LCG *lcg,int *memory, int noOfCitiesVisited, double **routTable);
void depositPheromone(int *memory,double dt,double **pherTrails);
void pheromoneEvaporation(double** pherTrails);
void updateRoutTable(double **pherTrails,double **heuristicValues,double **routTable);
int findBestAnt(double *pathDistance);
double calcDistanceSquared(city c1,city c2);
double calcDistance(city c1,city c2);
void printPath(int *path);
int main(int argc, char* argv[]){
city cities[NUM_OF_CITIES];
double **adjMatrix=doubleArray2DInHeap(NUM_OF_CITIES,NUM_OF_CITIES);
double **pherTrails=doubleArray2DInHeap(NUM_OF_CITIES,NUM_OF_CITIES);
double **heuristicValues=doubleArray2DInHeap(NUM_OF_CITIES,NUM_OF_CITIES);
double **routTable=doubleArray2DInHeap(NUM_OF_CITIES,NUM_OF_CITIES);
int **memory=intArray2DInHeap(NUM_OF_ANTS,NUM_OF_CITIES);
double pathDistance[NUM_OF_ANTS];
double dt;
int ant;
int noOfCitiesVisited;
int nextCity;
int iteration=0;
int bestAnt;
int threadID;
char message[100];
LCG lcg=(LCG) {44485709377909ULL,11863279ULL,281474976710656ULL,time(NULL)};
//Core logic=======================================================================================
citiesInitialization(&lcg,cities);
constructGraph(cities,adjMatrix);
heuristicValuesInitialization(adjMatrix,heuristicValues);
pherTrailsInitialization(pherTrails);
updateRoutTable(pherTrails,heuristicValues,routTable);
init();
// In the first iteration, build the city grid without any connections
buildCityGrid(cities, NUM_OF_CITIES);
int quit = 0;
SDL_Event e;
#pragma omp parallel num_threads(NUM_OF_THREADS) private(threadID,dt,ant,noOfCitiesVisited,nextCity,lcg)
{
threadID=omp_get_thread_num();
LCG lcg=(LCG) {44485709377909ULL,11863279ULL,281474976710656ULL,&threadID}; //Each thread gets a different seed
while(!quit){
//Each thread spawns NUM_OF_ANTS_PER_THREAD ants
for(ant=0; ant<NUM_OF_ANTS_PER_THREAD; ant++){
memory[threadID*NUM_OF_ANTS_PER_THREAD+ant][0]=0; //All ants start from cities[0]
pathDistance[threadID*NUM_OF_ANTS_PER_THREAD+ant]=0; //Reset pathDistance of this ant
//Ant finds a hamiltonian cycle
for(noOfCitiesVisited=1; noOfCitiesVisited<NUM_OF_CITIES; noOfCitiesVisited++){
nextCity=calcNextCity(&lcg,memory[threadID*NUM_OF_ANTS_PER_THREAD+ant],noOfCitiesVisited,routTable);
memory[threadID*NUM_OF_ANTS_PER_THREAD+ant][noOfCitiesVisited]=nextCity;
pathDistance[threadID*NUM_OF_ANTS_PER_THREAD+ant]+=adjMatrix[memory[threadID*NUM_OF_ANTS_PER_THREAD+ant][noOfCitiesVisited-1]][nextCity];
}
//Add the distance from the last city back to the starting one
pathDistance[threadID*NUM_OF_ANTS_PER_THREAD+ant]+=adjMatrix[nextCity][0];
dt=1.0/pathDistance[threadID*NUM_OF_ANTS_PER_THREAD+ant]; //Although each ant calls depositPheromone as soon as it finds a hamiltonian cycle, the changes in the pherTrails
depositPheromone(memory[threadID*NUM_OF_ANTS_PER_THREAD+ant],dt,pherTrails); //it causes are invisible to the ants of this iteration since ants only utilize routTable for routing decisions (which
} //is updated with updateRoutTable())
#pragma omp barrier
#pragma omp single
{
pheromoneEvaporation(pherTrails);
updateRoutTable(pherTrails,heuristicValues,routTable);
bestAnt=findBestAnt(pathDistance);
sprintf(message,"Iteration %d: Best Ant=%d, Minimum Path Distance = %.2f\n",iteration,bestAnt,pathDistance[bestAnt]);
printf("%s",message);
clearWindow();
updateIterationText(message);
buildCityGrid(cities, NUM_OF_CITIES);
connectCities(cities, memory, bestAnt);
while (SDL_PollEvent(&e) != 0)
{
if (e.type == SDL_QUIT)
{
quit = 1;
}
else if (e.type == SDL_KEYDOWN && e.key.keysym.sym == SDLK_ESCAPE)
{
quit = 1;
}
}
iteration++;
}
}
}
//=================================================================================================
shutDown();
return 0;
}
void citiesInitialization(LCG *lcg,city *cities){
int a;
city *c;
for(a=0; a<NUM_OF_CITIES; a++){
c=&cities[a];
c->x=randDBLBetween(lcg,0.0, (double) X_MAX);
c->y=randDBLBetween(lcg,0.0, (double) Y_MAX);
}
}
void constructGraph(city *cities, double **adjMatrix){
int a;
int b;
for(a=0; a<NUM_OF_CITIES; a++){
for(b=0; b<NUM_OF_CITIES; b++){
adjMatrix[a][b]=calcDistance(cities[a],cities[b]);
}
}
}
void heuristicValuesInitialization(double **adjMatrix, double **heuristicValues){
int a,b;
for(a=0; a<NUM_OF_CITIES; a++){
for(b=0; b<NUM_OF_CITIES; b++){
if(a==b){
heuristicValues[a][a]=0;
}else{
heuristicValues[a][b]=1/adjMatrix[a][b];
}
}
}
}
void pherTrailsInitialization(double **pherTrails){
setTableD(pherTrails,NUM_OF_CITIES,NUM_OF_CITIES,T0);
for(int a=0; a<NUM_OF_CITIES; a++){
pherTrails[a][a]=0;
}
}
int calcNextCity(LCG *lcg,int *memory, int noOfCitiesVisited, double **routTable){
int curCity=memory[noOfCitiesVisited-1];
int possibleToTravelTo[NUM_OF_CITIES];
double denominator;
double probabilities[NUM_OF_CITIES];
double limits[NUM_OF_CITIES];
int a;
double randomNum;
//Find the possible next cities
setArrayI(possibleToTravelTo,NUM_OF_CITIES,1); //You can go to every city...
for(a=0; a<noOfCitiesVisited; a++){ //No need to check the whole memory
possibleToTravelTo[memory[a]]=0; //...except those already visited
}
//Calculate denominator
denominator=0;
for(a=0; a<NUM_OF_CITIES; a++){
if(possibleToTravelTo[a]){
denominator+=routTable[curCity][a];
}
}
//Calculate probabilities
for(a=0; a<NUM_OF_CITIES; a++){
if(possibleToTravelTo[a]){
probabilities[a]=routTable[curCity][a]/denominator;
}else{
probabilities[a]=0.0;
}
}
//Calculate limits
limits[0]=0;
for(a=1; a<NUM_OF_CITIES; a++){
limits[a]=limits[a-1]+probabilities[a-1];
}
//Calculate nextCity
randomNum=randDBLBetween(lcg,0.0,1.0);
for(a=1; a<NUM_OF_CITIES; a++){
//Covers every case except when limits[a-1]=0,randomNum=0,limits[a]=X
if( limits[a-1]<randomNum && randomNum<=limits[a] ){ //(,]
break;
////Covers every case except when limits[a-1]=X,randomNum=1,limits[a]=1
}else if( limits[a-1]<=randomNum && randomNum<limits[a] ){ //[,)
break;
}
}
return --a;
}
void depositPheromone(int *memory,double dt,double **pherTrails){
int a;
for(a=0; a<=NUM_OF_CITIES-2; a++){
pherTrails[memory[a]][memory[a+1]]+=dt;
}
pherTrails[memory[a]][0]+=dt;
}
void pheromoneEvaporation(double** pherTrails){
int a,b;
for(a=0; a<NUM_OF_CITIES; a++){
for(b=0; b<NUM_OF_CITIES; b++){
pherTrails[a][b]*=(1-RHO);
}
}
}
void updateRoutTable(double **pherTrails,double **heuristicValues,double **routTable){
int a,b;
double numerator;
double denominator;
for(a=0; a<NUM_OF_CITIES; a++){
//Calculate denominator
denominator=0.0;
for(b=0; b<NUM_OF_CITIES; b++){
denominator+=pow(pherTrails[a][b],ALPHA)*pow(heuristicValues[a][b],BETA);
}
//Calculate elements
for(b=0; b<NUM_OF_CITIES; b++){
numerator=pow(pherTrails[a][b],ALPHA)*pow(heuristicValues[a][b],BETA);
routTable[a][b]=numerator/denominator;
}
}
}
int findBestAnt(double *pathDistance){
int ant;
int bestAnt;
double bestPath;
double candPath;
bestAnt=0;
bestPath=pathDistance[0];
for(ant=1; ant<NUM_OF_ANTS; ant++){
if( (candPath=pathDistance[ant])<bestPath){
bestAnt=ant;
bestPath=candPath;
}
}
return bestAnt;
}
double calcDistanceSquared(city c1,city c2){
double x_dif=c1.x-c2.x;
double y_dif=c1.y-c2.y;
return x_dif*x_dif+y_dif*y_dif;
}
double calcDistance(city c1,city c2){
return sqrt(calcDistanceSquared(c1,c2));
}
void printPath(int *path){
int a=0;
printf("%d",path[0]);
for(a=1; a<NUM_OF_CITIES; a++){
printf("->%d",path[a]);
}
} |
omp-for-ordered.c | #include <stdio.h>
#include <omp.h>
int main() {
#pragma omp parallel
{
int i;
#pragma omp single
printf("\nloop0, static, chunk 1, no ordered\n");
#pragma omp for schedule(static, 1)
for(i = 0; i < 10 ; i++) {
printf("Thread %d, i = %d\n",omp_get_thread_num(), i);
}
#pragma omp single
printf("loop1, no schedule\n");
#pragma omp for ordered
for(i = 0; i < 10 ; i++) {
#pragma omp ordered
{
printf("Thread %d, i = %d\n",omp_get_thread_num(), i);
}
}
#pragma omp single
printf("\nloop2, static\n");
#pragma omp for schedule(static) ordered
for(i = 0; i < 10 ; i++) {
#pragma omp ordered
{
printf("Thread %d, i = %d\n",omp_get_thread_num(), i);
}
}
#pragma omp single
printf("\nloop3, static, chunk 1\n");
#pragma omp for schedule(static, 1) ordered
for(i = 0; i < 10 ; i++) {
#pragma omp ordered
{
printf("Thread %d, i = %d\n",omp_get_thread_num(), i);
}
}
#pragma omp single
printf("\nloop4, static, chunk 2\n");
#pragma omp for schedule(static, 2) ordered
for(i = 0; i < 10 ; i++) {
#pragma omp ordered
{
printf("Thread %d, i = %d\n",omp_get_thread_num(), i);
}
}
}
return 0;
}
|
LRBreakup.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_LRBREAKUP_H
#define QMCPLUSPLUS_LRBREAKUP_H
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "LongRange/KContainer.h"
#include "Numerics/OhmmsBlas.h"
#include <cassert>
namespace qmcplusplus
{
template<class BreakupBasis>
struct LRBreakup
{
DECLARE_COULOMB_TYPES
//Typedef for the lattice-type. We don't need the full particle-set.
typedef ParticleSet::ParticleLayout_t ParticleLayout_t;
//We use an internal k-list with degeneracies to do the breakup.
//We do this because the number of vectors is much larger than we'd
//use elsewhere.
void AddKToList(mRealType k, mRealType degeneracy = 1.0);
///The basis to be used for breakup.
BreakupBasis& Basis;
/// For each k, KList[k][0] = |k| and KList[k][1] = degeneracy
std::vector<TinyVector<mRealType, 2>> KList;
/** setup KList
* @param kc k-space cutoff for long-range sums
* @param kcont k at which approximate (spherical shell) degeneracies are used.
* @param kmax largest k used for performing the breakup
* @return the maximum kshell for the given kc
*/
int SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax);
//Fk is FT of F_full(r) up to kmax
//adjust is used for constraining values in the breakup
/* REPLACED SO WE CAN USE TYPES OTHER THAN STL VECTOR.
mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t,
const std::vector<bool> &adjust);
mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t);
*/
mRealType DoBreakup(mRealType* Fk, mRealType* t, mRealType* adjust);
mRealType DoGradBreakup(mRealType* Fk, mRealType* t, mRealType* adjust);
mRealType DoStrainBreakup(mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* adjust);
void DoAllBreakup(mRealType* chisqr,
mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* gt,
mRealType* dt,
mRealType* adjust);
mRealType DoBreakup(mRealType* Fk, mRealType* t)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int numElem = Basis.NumBasisElem(); //t.size();
A.resize(numElem, numElem);
b.resize(numElem, 0.0);
cnk.resize(numElem, KList.size());
// Fill in cnk.
// app_log() << "Check OMP size : numElem, KList.size : " << numElem << " , " << KList.size() << std::endl;
#pragma omp parallel for shared(cnk)
for (int n = 0; n < numElem; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
// Now, fill in A and b
A = 0.0;
for (int l = 0; l < numElem; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < numElem; n++)
A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//////////////////////////
//Do the SVD:
// Matrix<mRealType> U(numElem, numElem), V(numElem, numElem);
// std::vector<mRealType> S(numElem), Sinv(numElem);
//////////////////////////
// SVdecomp(A, U, S, V);
//////////////////////////
int M = A.rows();
int N = A.cols();
Matrix<mRealType> Atrans(N, M);
Matrix<mRealType> U, V;
U.resize(std::min(M, N), M);
V.resize(N, std::min(M, N));
std::vector<mRealType> S, Sinv;
S.resize(std::min(N, M));
//Do the transpose
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
Atrans(j, i) = A(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = M;
int LDU = M;
int LDVT = std::min(M, N);
int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
///////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < S.size(); i++)
Smax = std::max(S[i], Smax);
Sinv.resize(S.size());
for (int i = 0; i < S.size(); i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
for (int i = 0; i < numElem; i++)
t[i] = 0.0;
// Compute t_n, removing singular values
for (int i = 0; i < numElem; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < numElem; j++)
coef += U(j, i) * b[j];
coef *= Sinv[i];
for (int k = 0; k < numElem; k++)
t[k] += coef * V(k, i);
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < numElem; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][1] * Yk * Yk;
}
return (chi2);
}
//The constructor. Call the constructor of basis...
//set up the basis parameters too.
LRBreakup(BreakupBasis& bref) : Basis(bref)
{ /*Do Nothing*/
}
mRealType DoGradBreakup(mRealType* Fk, mRealType* t)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int numElem = Basis.NumBasisElem(); //t.size();
A.resize(numElem, numElem);
b.resize(numElem, 0.0);
cnk.resize(numElem, KList.size());
// Fill in cnk.
for (int n = 0; n < numElem; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
// Now, fill in A and b
A = 0.0;
for (int l = 0; l < numElem; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < numElem; n++)
A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//////////////////////////
//Do the SVD:
// Matrix<mRealType> U(numElem, numElem), V(numElem, numElem);
// std::vector<mRealType> S(numElem), Sinv(numElem);
//////////////////////////
// SVdecomp(A, U, S, V);
//////////////////////////
int M = A.rows();
int N = A.cols();
Matrix<mRealType> Atrans(N, M);
Matrix<mRealType> U, V;
U.resize(std::min(M, N), M);
V.resize(N, std::min(M, N));
std::vector<mRealType> S, Sinv;
S.resize(std::min(N, M));
//Do the transpose
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
Atrans(j, i) = A(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = M;
int LDU = M;
int LDVT = std::min(M, N);
int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
///////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < S.size(); i++)
Smax = std::max(S[i], Smax);
Sinv.resize(S.size());
for (int i = 0; i < S.size(); i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
for (int i = 0; i < numElem; i++)
t[i] = 0.0;
// Compute t_n, removing singular values
for (int i = 0; i < numElem; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < numElem; j++)
coef += U(j, i) * b[j];
coef *= Sinv[i];
for (int k = 0; k < numElem; k++)
t[k] += coef * V(k, i);
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
Yk = Fk[ki];
for (int n = 0; n < numElem; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += k2 * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
};
template<class BreakupBasis>
void LRBreakup<BreakupBasis>::AddKToList(mRealType k, mRealType degeneracy /* =1.0 */)
{
//Search for this k already in list
int ki = 0;
while ((ki < KList.size()) && (std::abs(k - KList[ki][0]) > 1.0e-12))
ki++;
if (ki == KList.size())
{
TinyVector<mRealType, 2> temp(k, degeneracy);
KList.push_back(temp);
}
else
KList[ki][1] += degeneracy;
}
template<class BreakupBasis>
int LRBreakup<BreakupBasis>::SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax)
{
//Add low |k| ( < kcont) k-points with exact degeneracy
KContainer kexact;
kexact.UpdateKLists(Basis.get_Lattice(), kcont);
bool findK = true;
mRealType kc2 = kc * kc;
//use at least one shell
size_t ks = 0;
kc2 = std::max(kc2, static_cast<mRealType>(kexact.ksq[kexact.kshell[ks]]));
while (findK)
{
if (kexact.ksq[kexact.kshell[ks]] > kc2)
findK = false;
else
ks++;
}
size_t maxkshell = ks;
size_t numk = kexact.numk - kexact.kshell[ks];
for (; ks < kexact.kshell.size() - 1; ks++)
AddKToList(std::sqrt(kexact.ksq[kexact.kshell[ks]]), kexact.kshell[ks + 1] - kexact.kshell[ks]);
////Add these vectors to the internal list
//int numk=0;
//mRealType modk2;
//for(int ki=0; ki<kexact.numk; ki++) {
// modk2 = dot(kexact.kpts_cart[ki],kexact.kpts_cart[ki]);
// if(modk2 > (kc*kc)) { //Breakup needs kc < k < kcont.
// AddKToList(std::sqrt(modk2));
// numk++;
// }
//}
//Add high |k| ( >kcont, <kmax) k-points with approximate degeneracy
//Volume of 1 K-point is (2pi)^3/(a1.a2^a3)
#if OHMMS_DIM == 3
mRealType kelemvol = 8 * M_PI * M_PI * M_PI / Basis.get_CellVolume();
//Generate 4000 shells:
const int N = 4000;
mRealType deltak = (kmax - kcont) / N;
for (int i = 0; i < N; i++)
{
mRealType k1 = kcont + deltak * i;
mRealType k2 = k1 + deltak;
mRealType kmid = 0.5 * (k1 + k2);
mRealType shellvol = 4.0 * M_PI * (k2 * k2 * k2 - k1 * k1 * k1) / 3.0;
mRealType degeneracy = shellvol / kelemvol;
AddKToList(kmid, degeneracy);
numk += static_cast<int>(degeneracy);
}
#elif OHMMS_DIM == 2
mRealType kelemvol = 4 * M_PI * M_PI / Basis.get_CellVolume();
//Generate 8000 shells:
const int N = 8000;
mRealType deltak = (kmax - kcont) / N;
for (int i = 0; i < N; i++)
{
mRealType k1 = kcont + deltak * i;
mRealType k2 = k1 + deltak;
mRealType kmid = 0.5 * (k1 + k2);
mRealType shellvol = M_PI * (k2 * k2 - k1 * k1);
mRealType degeneracy = shellvol / kelemvol;
AddKToList(kmid, degeneracy);
numk += static_cast<int>(degeneracy);
}
#endif
app_log() << " NUMBER OF OPT_BREAK KVECS = " << numk << std::endl;
return maxkshell;
//numk now contains the total number of vectors.
//this->klist.size() contains the number of unique vectors.
}
//Do the constrained breakup
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoBreakup(mRealType* Fk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
cnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoGradBreakup(mRealType* Fk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
cnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoStrainBreakup(mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> dcnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
dcnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
// b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki);
b[l] += k2 * KList[ki][1] * (dFk[ki]) * dcnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = dFk[ki]; //-Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= dcnk(n, ki) * t[n];
}
chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
void LRBreakup<BreakupBasis>::DoAllBreakup(mRealType* chisqrlist,
mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* gt,
mRealType* dt,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
Matrix<mRealType> Af;
Matrix<mRealType> As;
std::vector<mRealType> b;
std::vector<mRealType> bf;
std::vector<mRealType> bs;
Matrix<mRealType> cnk;
Matrix<mRealType> dcnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
Af.resize(N, N);
As.resize(N, N);
b.resize(N, 0.0);
bf.resize(N, 0.0);
bs.resize(N, 0.0);
cnk.resize(N, KList.size());
dcnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k);
}
}
//Fill in A and b
A = 0.0;
Af = 0.0;
As = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
mRealType temp = KList[ki][1] * Fk[ki] * cnk(l, ki);
// b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki);
b[l] += temp;
bf[l] += k2 * temp;
bs[l] += k2 * KList[ki][1] * dFk[ki] * dcnk(l, ki);
for (int n = 0; n < N; n++)
{
temp = KList[ki][1] * cnk(l, ki) * cnk(n, ki);
A(l, n) += temp;
Af(l, n) += k2 * temp;
As(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki);
}
}
}
//************************************
//FOR POTENTIAL AND FORCE
//************************************
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Matrix<mRealType> Afc;
Matrix<mRealType> Asc;
Ac.resize(M, M);
Afc.resize(M, M);
Asc.resize(M, M);
std::vector<mRealType> bc(M, 0.0), bfc(M, 0.0), bsc(M, 0.0), tc(M, 0.0), tfc(M, 0.0), tsc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
Afc(i, j) = Af(row, col);
Asc(i, j) = As(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
{
b[row] -= A(row, col) * t[col];
bf[row] -= Af(row, col) * gt[col];
bs[row] -= As(row, col) * dt[col];
}
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
bfc[j] = bf[row];
bsc[j] = bs[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> A_trans(n, m);
Matrix<mRealType> Af_trans(n, m);
Matrix<mRealType> As_trans(n, m);
Matrix<mRealType> U, V;
Matrix<mRealType> Uf, Vf;
Matrix<mRealType> Us, Vs;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
Uf.resize(std::min(m, n), m);
Vf.resize(n, std::min(m, n));
Us.resize(std::min(m, n), m);
Vs.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
std::vector<mRealType> Sf, Sfinv;
Sf.resize(std::min(n, m));
std::vector<mRealType> Ss, Ssinv;
Ss.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
A_trans(j, i) = Ac(i, j);
Af_trans(j, i) = Afc(i, j);
As_trans(j, i) = Asc(i, j);
}
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, A_trans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Af_trans.data(), &LDA, &Sf[0], Uf.data(), &LDU, Vf.data(), &LDVT, &WORK[0],
&LWORK, &INFO);
assert(INFO == 0);
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, As_trans.data(), &LDA, &Ss[0], Us.data(), &LDU, Vs.data(), &LDVT, &WORK[0],
&LWORK, &INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> U_trans(uc, ur);
Matrix<mRealType> Uf_trans(uc, ur);
Matrix<mRealType> Us_trans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
{
U_trans(j, i) = U(i, j);
Uf_trans(j, i) = Uf(i, j);
Us_trans(j, i) = Us(i, j);
}
}
U.resize(uc, ur);
U = U_trans;
Uf.resize(uc, ur);
Uf = Uf_trans;
Us.resize(uc, ur);
Us = Us_trans;
//////////////////////////////////
// Zero out near-singular values
//First, do normal breakup.
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in energy breakup.\n";
// Compute t_n, removing singular values
//Second, do force.
Smax = Sf[0];
for (int i = 1; i < M; i++)
Smax = std::max(Sf[i], Smax);
for (int i = 0; i < M; i++)
if (Sf[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sfinv.resize(Sf.size());
for (int i = 0; i < M; i++)
Sfinv[i] = (Sf[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Sf[i]);
numSingular = 0;
for (int i = 0; i < Sfinv.size(); i++)
if (Sfinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in force breakup.\n";
// Compute t_n, removing singular values
//First, do strain.
Smax = Ss[0];
for (int i = 1; i < M; i++)
Smax = std::max(Ss[i], Smax);
for (int i = 0; i < M; i++)
if (Ss[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Ssinv.resize(Ss.size());
for (int i = 0; i < M; i++)
Ssinv[i] = (Ss[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Ss[i]);
numSingular = 0;
for (int i = 0; i < Ssinv.size(); i++)
if (Ssinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in strain breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
mRealType coef_f = 0.0;
mRealType coef_s = 0.0;
for (int j = 0; j < M; j++)
{
coef += U(j, i) * bc[j];
coef_f += Uf(j, i) * bfc[j];
coef_s += Us(j, i) * bsc[j];
}
coef *= Sinv[i];
coef_f *= Sfinv[i];
coef_s *= Ssinv[i];
for (int k = 0; k < M; k++)
{
tc[k] += coef * V(k, i);
tfc[k] += coef_f * Vf(k, i);
tsc[k] += coef_s * Vs(k, i);
}
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
gt[i] = tfc[j];
dt[i] = tsc[j];
j++;
}
// Calculate chi-squared
mRealType Yk(0.0), chi2(0.0);
mRealType Yk_f(0.0), chi2_f(0.0);
mRealType Yk_s(0.0), chi2_s(0.0);
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki]; //-Fk[ki];
Yk_f = Fk[ki];
Yk_s = dFk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
Yk_f -= cnk(n, ki) * gt[n];
Yk_s -= dcnk(n, ki) * dt[n];
}
chi2 += KList[ki][1] * Yk * Yk;
chi2_f += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_f * Yk_f;
chi2_s += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_s * Yk_s;
}
// std::vector<mRealType> chisqrtmp(3);
chisqrlist[0] = chi2;
chisqrlist[1] = chi2_f;
chisqrlist[2] = chi2_s;
//chisqrlist=chisqrtmp;
}
} // namespace qmcplusplus
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
if ( number_arguments <= 1 && (number_arguments-1)%cp_size != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid number of args: order [CPs]...");
return((double *) NULL);
}
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel ) pixel.red *= QuantumRange;
if ( channel & GreenChannel ) pixel.green *= QuantumRange;
if ( channel & BlueChannel ) pixel.blue *= QuantumRange;
if ( channel & IndexChannel ) pixel.index *= QuantumRange;
if ( channel & OpacityChannel ) pixel.opacity *= QuantumRange;
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
GB_unaryop__lnot_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_int64
// op(A') function: GB_tran__lnot_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_int64
(
uint64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
odf_fmt_plug.c | /* ODF cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_odf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_odf);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sha.h"
#include "sha2.h"
#include <openssl/blowfish.h>
#include "aes.h"
#include "pbkdf2_hmac_sha1.h"
#include "memdbg.h"
#define FORMAT_LABEL "ODF"
#define FORMAT_TAG "$odf$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1/SHA256 " SHA1_ALGORITHM_NAME " BF/AES"
#else
#define ALGORITHM_NAME "SHA1/SHA256 BF/AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests odf_tests[] = {
{"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"},
{"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"},
{"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"},
{"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"},
{"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"},
/* CMIYC 2013 "pro" hard hash */
{"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
res = hexlenl(p, &extra);
if (extra)
goto err;
if (res != BINARY_SIZE * 2 && res != 64) // 2 hash types.
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (strlen(p) >= 10)
goto err;
res = atoi(p);
if (res > 32 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$odf$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos, i;
unsigned char ivec[8];
unsigned char output[1024];
SHA_CTX ctx;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
#endif
if(cur_salt->checksum_type == 0 && cur_salt->cipher_type == 0) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)(hash[i]), &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->content_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
else {
SHA256_CTX ctx;
AES_KEY akey;
unsigned char iv[16];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 32;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 32, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memcpy(iv, cur_salt->iv, 16);
memset(&akey, 0, sizeof(AES_KEY));
if(AES_set_decrypt_key(key[i], 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(cur_salt->content, output, cur_salt->content_length, &akey, iv, AES_DECRYPT);
SHA256_Init(&ctx);
SHA256_Update(&ctx, output, cur_salt->content_length);
SHA256_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void odf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* The format tests all have iteration count 1024.
* Just in case the iteration count is tunable, let's report it.
*/
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_odf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
odf_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
odf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ast-dump-openmp-target.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:9, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:9) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:9) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:9) *const restrict'
|
ast-dump-openmp-begin-declare-variant_10.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// expected-no-diagnostics
#ifdef __cplusplus
#define CONST constexpr
#else
#define CONST __attribute__((const))
#endif
int also_before1(void) {
return 1;
}
int also_before2(void) {
return 2;
}
int also_before3(void) {
return 3;
}
int also_before4(void) {
return 4;
}
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
CONST int also_before1(void) {
return 0;
}
static int also_before2(void) {
return 0;
}
__attribute__((nothrow)) int also_before3(void) {
return 0;
}
static CONST __attribute__((nothrow, always_inline)) __inline__ int also_before4(void) {
return 0;
}
#pragma omp end declare variant
int main(void) {
// Should return 0.
return also_before1() + also_before2() + also_before3() + also_before4();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:1> line:11:5 used also_before1 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:24, line:13:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:12:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:8:15> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:14:1, line:16:1> line:14:5 used also_before2 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:24, line:16:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:15:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <line:28:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:17:1, line:19:1> line:17:5 used also_before3 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:24, line:19:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <line:18:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int' 3
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <line:31:1> 'int ({{.*}})' Function [[ADDR_20:0x[a-z0-9]*]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before4 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:24, line:22:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:21:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 4
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}})' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_6]] <line:8:15, line:27:1> line:8:15 also_before1[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_28:0x[a-z0-9]*]] <line:25:30, line:27:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_29:0x[a-z0-9]*]] <line:26:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_30:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-ConstAttr [[ADDR_31:0x[a-z0-9]*]] <line:8:30>
// C-NEXT: |-FunctionDecl [[ADDR_13]] <line:28:1, line:30:1> line:28:1 also_before2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// C-NEXT: | `-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:31, line:30:1>
// C-NEXT: | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:29:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: |-FunctionDecl [[ADDR_20]] <line:31:1, line:33:1> line:31:1 also_before3[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_35:0x[a-z0-9]*]] <col:49, line:33:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_36:0x[a-z0-9]*]] <line:32:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-NoThrowAttr [[ADDR_38:0x[a-z0-9]*]] <line:31:16>
// C-NEXT: |-FunctionDecl [[ADDR_27]] <line:34:1, line:36:1> line:34:1 also_before4[implementation={vendor(llvm)}] 'int ({{.*}})' static inline
// C-NEXT: | |-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:88, line:36:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:35:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | |-ConstAttr [[ADDR_42:0x[a-z0-9]*]] <line:8:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_43:0x[a-z0-9]*]] <line:34:29>
// C-NEXT: | `-AlwaysInlineAttr [[ADDR_44:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: `-FunctionDecl [[ADDR_45:0x[a-z0-9]*]] <line:40:1, line:43:1> line:40:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:16, line:43:1>
// C-NEXT: `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:42:3, col:74>
// C-NEXT: `-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:10, col:74> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_49:0x[a-z0-9]*]] <col:10, col:57> 'int' '+'
// C-NEXT: | |-BinaryOperator [[ADDR_50:0x[a-z0-9]*]] <col:10, col:40> 'int' '+'
// C-NEXT: | | |-PseudoObjectExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int'
// C-NEXT: | | | |-CallExpr [[ADDR_52:0x[a-z0-9]*]] <col:10, col:23> 'int'
// C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' Function [[ADDR_0]] 'also_before1' 'int ({{.*}})'
// C-NEXT: | | | `-CallExpr [[ADDR_55:0x[a-z0-9]*]] <line:8:15, line:42:23> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_56:0x[a-z0-9]*]] <line:8:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_5]] <col:15> 'int ({{.*}})' Function [[ADDR_6]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | | `-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <line:42:27, col:40> 'int'
// C-NEXT: | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:27, col:40> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:27> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:27> 'int ({{.*}})' Function [[ADDR_7]] 'also_before2' 'int ({{.*}})'
// C-NEXT: | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:28:1, line:42:40> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:28:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:42:44, col:57> 'int'
// C-NEXT: | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:44, col:57> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:44> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:44> 'int ({{.*}})' Function [[ADDR_14]] 'also_before3' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_67:0x[a-z0-9]*]] <line:31:1, line:42:57> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <line:31:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <line:42:61, col:74> 'int'
// C-NEXT: |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:61, col:74> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:61> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:61> 'int ({{.*}})' Function [[ADDR_21]] 'also_before4' 'int ({{.*}})'
// C-NEXT: `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:34:1, line:42:74> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_26]] <col:1> 'int ({{.*}})' Function [[ADDR_27]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:1> line:11:5 used also_before1 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:24, line:13:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:12:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:6:15> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:14:1, line:16:1> line:14:5 used also_before2 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:24, line:16:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:15:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <line:28:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:17:1, line:19:1> line:17:5 used also_before3 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:24, line:19:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <line:18:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int' 3
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <line:31:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_20:0x[a-z0-9]*]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before4 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:24, line:22:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:21:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 4
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:6:15, line:27:1> line:6:15 constexpr also_before1[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_28:0x[a-z0-9]*]] <line:25:30, line:27:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_29:0x[a-z0-9]*]] <line:26:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_30:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_13]] <line:28:1, line:30:1> line:28:1 also_before2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// CXX-NEXT: | `-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:31, line:30:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:29:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_33:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_20]] <line:31:1, line:33:1> line:31:1 also_before3[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:49, line:33:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:32:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_27]] <line:34:1, line:36:1> line:34:1 constexpr also_before4[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-CompoundStmt [[ADDR_37:0x[a-z0-9]*]] <col:88, line:36:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_38:0x[a-z0-9]*]] <line:35:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_39:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-AlwaysInlineAttr [[ADDR_40:0x[a-z0-9]*]] <line:34:38> always_inline
// CXX-NEXT: `-FunctionDecl [[ADDR_41:0x[a-z0-9]*]] <line:40:1, line:43:1> line:40:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_42:0x[a-z0-9]*]] <col:16, line:43:1>
// CXX-NEXT: `-ReturnStmt [[ADDR_43:0x[a-z0-9]*]] <line:42:3, col:74>
// CXX-NEXT: `-BinaryOperator [[ADDR_44:0x[a-z0-9]*]] <col:10, col:74> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:10, col:57> 'int' '+'
// CXX-NEXT: | |-BinaryOperator [[ADDR_46:0x[a-z0-9]*]] <col:10, col:40> 'int' '+'
// CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_47:0x[a-z0-9]*]] <col:10, col:23> 'int'
// CXX-NEXT: | | | |-CallExpr [[ADDR_48:0x[a-z0-9]*]] <col:10, col:23> 'int'
// CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_49:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_50:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before1' 'int ({{.*}})'
// CXX-NEXT: | | | `-CallExpr [[ADDR_51:0x[a-z0-9]*]] <line:6:15, line:42:23> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_52:0x[a-z0-9]*]] <line:6:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_5]] <col:15> 'int ({{.*}})' Function [[ADDR_6]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_53:0x[a-z0-9]*]] <line:42:27, col:40> 'int'
// CXX-NEXT: | | |-CallExpr [[ADDR_54:0x[a-z0-9]*]] <col:27, col:40> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <col:27> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:27> 'int ({{.*}})' {{.*}}Function [[ADDR_7]] 'also_before2' 'int ({{.*}})'
// CXX-NEXT: | | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:28:1, line:42:40> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <line:28:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | `-PseudoObjectExpr [[ADDR_59:0x[a-z0-9]*]] <line:42:44, col:57> 'int'
// CXX-NEXT: | |-CallExpr [[ADDR_60:0x[a-z0-9]*]] <col:44, col:57> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:44> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:44> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'also_before3' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_63:0x[a-z0-9]*]] <line:31:1, line:42:57> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_20]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-PseudoObjectExpr [[ADDR_65:0x[a-z0-9]*]] <line:42:61, col:74> 'int'
// CXX-NEXT: |-CallExpr [[ADDR_66:0x[a-z0-9]*]] <col:61, col:74> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_67:0x[a-z0-9]*]] <col:61> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_68:0x[a-z0-9]*]] <col:61> 'int ({{.*}})' {{.*}}Function [[ADDR_21]] 'also_before4' 'int ({{.*}})'
// CXX-NEXT: `-CallExpr [[ADDR_69:0x[a-z0-9]*]] <line:34:1, line:42:74> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_70:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_26]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_27]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
|
GxB_IndexUnaryOp_ztype_name.c | //------------------------------------------------------------------------------
// GxB_IndexUnaryOp_ztype_name: return the type_name of z for z=f(x,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_IndexUnaryOp_ztype_name // return the name of the type of z
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_IndexUnaryOp op
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_IndexUnaryOp_ztype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
ASSERT_INDEXUNARYOP_OK (op, "op for ztype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, op->ztype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
test.c | #include <stdio.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define N 100
#pragma omp declare target
#pragma omp declare simd
int foo(int k) {
return k+1;
}
#pragma omp declare simd simdlen(16)
int foo_simdlen(int k) {
return k+1;
}
#pragma omp declare simd linear(b:1)
int foo_linear(int *b) {
return *b+1;
}
#pragma omp declare simd aligned(b:8)
int foo_aligned(int *b) {
return *b+1;
}
#pragma omp declare simd linear(b:1) uniform(c)
int foo_uniform(int *b, int c) {
return *b+c;
}
#pragma omp declare simd linear(b:1) uniform(c) inbranch
int foo_inbranch(int *b, int c) {
return *b+c;
}
#pragma omp declare simd linear(b:1) uniform(c) notinbranch
int foo_notinbranch(int *b, int c) {
return *b+c;
}
#pragma omp end declare target
int main()
{
check_offloading();
int a[N], aa[N], b[N];
int i, fail = 0;
/// Test: no clauses
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++)
a[k] = foo(k);
}
// host
for(i=0; i<N; i++)
aa[i] = i+1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: simdlen
fail = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++)
a[k] = foo_simdlen(k);
}
// host
for(i=0; i<N; i++)
aa[i] = i+1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: linear
fail = 0;
// initialize
for(i=0; i<N; i++) {
aa[i] = a[i] = -1;
b[i] = i;
}
// offload
#pragma omp target map(tofrom: a[0:100]) map(to:b[:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++) {
a[k] += foo_linear(&b[k]); // -1 += i
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: aligned
fail = 0;
// initialize
for(i=0; i<N; i++) {
aa[i] = a[i] = -1;
b[i] = i;
}
// offload
#pragma omp target map(tofrom: a[0:100]) map(to:b[:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++) {
a[k] += foo_aligned(&b[k]); // -1 += i
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: uniform
fail = 0;
// initialize
for(i=0; i<N; i++) {
aa[i] = a[i] = -1;
b[i] = i;
}
// offload
#pragma omp target map(tofrom: a[0:100]) map(to:b[:100])
{
int k;
int c = 3;
#pragma omp simd
for(k=0; k<N; k++) {
a[k] += foo_uniform(&b[k],c); // -1 += i
}
}
// host
for(i=0; i<N; i++)
aa[i] = i + 2;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: inbranch
fail = 0;
// initialize
for(i=0; i<N; i++) {
aa[i] = a[i] = -1;
b[i] = i;
}
// offload
#pragma omp target map(tofrom: a[0:100]) map(to:b[:100])
{
int k;
int c = 3;
#pragma omp simd
for(k=0; k<N; k++) {
if (k%2 == 0)
a[k] += foo_inbranch(&b[k],c); // -1 += i
}
}
// host
for(i=0; i<N; i++)
if (i%2 == 0)
aa[i] = i + 2;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
/// Test: notinbranch
fail = 0;
// initialize
for(i=0; i<N; i++) {
aa[i] = a[i] = -1;
b[i] = i;
}
// offload
#pragma omp target map(tofrom: a[0:100]) map(to:b[:100])
{
int k;
int c = 3;
#pragma omp simd
for(k=0; k<N; k++) {
a[k] += foo_notinbranch(&b[k],c); // -1 += i
}
}
// host
for(i=0; i<N; i++)
aa[i] = i + 2;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
printf("%d: a %d != %d\n", i, a[i], aa[i]);
fail = 1;
}
}
// report
if (fail)
printf("failed\n");
else
printf("success\n");
return 0;
}
|
CArbitrarySlice.h | ///////////////////////////////////////////////////////////////////////////////
// $Id$
//
// 3DimViewer
// Lightweight 3D DICOM viewer.
//
// Copyright 2008-2016 3Dim Laboratory s.r.o.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __CARBITRARYSLICE_H__
#define __CARBITRARYSLICE_H__
#include <data/CStorageInterface.h>
#include <data/CObjectHolder.h>
#include <data/CSlice.h>
#include <data/storage_ids_core.h>
#include <data/CDensityData.h>
#include <app/Signals.h>
namespace data
{
class CArbitrarySlice : public CSlice
{
public:
VPL_SHAREDPTR(CArbitrarySlice);
protected:
//! Scene voxel size.
osg::Vec3 m_VoxelSize;
//! Type of interpolation ( can be nearest or bilinear )
TInterpolationType m_InterpolationType;
//! Current plane position
osg::Vec3 m_center;
osg::Vec3 m_normal;
osg::Vec3 m_origin;
//! Right vector
osg::Vec3 m_right;
//! Size of slice
double m_fWidth, m_fHeight;
int m_position;
int m_positionMin;
int m_positionMax;
osg::Matrix m_rotationMatrix;
vpl::img::CImage16 m_regionXCoords;
vpl::img::CImage16 m_regionYCoords;
vpl::img::CImage16 m_regionZCoords;
bool m_regionCoordsInitialized;
public:
//! Constructor.
CArbitrarySlice();
//! Destructor.
virtual ~CArbitrarySlice();
//! Called upon updating from storage
virtual void update(const CChangedEntries& Changes);
//! Returns true if changes of a given parent entry may affect this object.
bool checkDependency(CStorageEntry* pParent)
{
return true;
}
//! Re-initializes the slice.
virtual void init();
//! Does object contain relevant data?
virtual bool hasData()
{
return false;
}
//! Returns voxel parameters
const osg::Vec3& getSliceVoxelSize() const
{
return m_VoxelSize;
}
void setPosition(double position);
//! Return plane position
int getPosition() const
{
return m_position;
}
int getPositionMax()
{
return m_positionMax;
}
int getPositionMin()
{
return m_positionMin;
}
void setPlaneCenter(const osg::Vec3& newCenter)
{
m_center = newCenter;
recomputePosition();
}
//! Return plane position
const osg::Vec3& getPlaneCenter() const
{
return m_center;
}
//! Return plane normal
const osg::Vec3& getPlaneNormal() const
{
return m_normal;
}
//! Return plane position
const osg::Vec3& getOrigin() const
{
return m_origin;
}
//! Return plane up vector
const osg::Vec3& getPlaneRight() const
{
return m_right;
}
double getSliceWidth()
{
return m_fWidth;
}
void setSliceWidth(double width)
{
m_fWidth = width;
}
double getSliceHeight()
{
return m_fHeight;
}
void setSliceHeight(double height)
{
m_fHeight = height;
}
//! Returns width (x-size) of the original image.
virtual vpl::tSize getWidth() const;
//! Returns height (y-size) of the original image.
virtual vpl::tSize getHeight() const;
void setRotationMatrix(const osg::Matrix& matrix)
{
m_rotationMatrix = matrix;
recomputePosition();
}
const osg::Matrix& getRotationMatrix()
{
return m_rotationMatrix;
}
bool computeSamplingParameters(osg::Vec3& outPosition, osg::Vec3& outVec1, osg::Vec3& outVec2, const CChangedEntries* Changes = NULL);
template<class VolumeType, class SliceType>
bool updateProperty(VolumeType* volume, SliceType* slice)
{
osg::Vec3 realPosition;
osg::Vec3 vec1;
osg::Vec3 vec2;
if (!computeSamplingParameters(realPosition, vec1, vec2))
{
return false;
}
data::CObjectPtr<data::CDensityData> spDensityVolume(APP_STORAGE.getEntry(VPL_SIGNAL(SigGetActiveDataSet).invoke2()));
// Calculate voxel size
#if(0)
osg::Vec3 v2(spDensityVolume->getDX() * vec2[0], spDensityVolume->getDY() * vec2[1], spDensityVolume->getDZ() * vec2[2]);
m_VoxelSize[0] = v2.length();
osg::Vec3 v1(spDensityVolume->getDX() * vec1[0], spDensityVolume->getDY() * vec1[1], spDensityVolume->getDZ() * vec1[2]);
m_VoxelSize[1] = v1.length();
#else
osg::Vec3f voxelSizeB = osg::Vec3f(1 / spDensityVolume->getDX(), 1 / spDensityVolume->getDY(), 1 / spDensityVolume->getDZ());
osg::Vec3f vvec1 = osg::componentMultiply(voxelSizeB, vec1);
vvec1.normalize();
osg::Vec3f vvec2 = osg::componentMultiply(voxelSizeB, vec2);
vvec2.normalize();
osg::Vec3 v2(spDensityVolume->getDX() * vvec2[0], spDensityVolume->getDY() * vvec2[1], spDensityVolume->getDZ() * vvec2[2]);
m_VoxelSize[0] = v2.length();
osg::Vec3 v1(spDensityVolume->getDX() * vvec1[0], spDensityVolume->getDY() * vvec1[1], spDensityVolume->getDZ() * vvec1[2]);
m_VoxelSize[1] = v1.length();
#endif
if (0 == m_VoxelSize[0] || 0 == m_VoxelSize[1])
{
return false;
}
// hotfix for extra small voxel sizes to avoid excessive memory requirements
m_VoxelSize[0] = std::max(0.01f, m_VoxelSize[0]);
m_VoxelSize[1] = std::max(0.01f, m_VoxelSize[1]);
// convert to volume coordinates
data::CCoordinatesConv CoordConv = VPL_SIGNAL(SigGetActiveConvObject).invoke2();
// Calculate voxel size of slice
//osg::Vec3d voxelSize = osg::Vec3d(spDensityVolume->getDX(), spDensityVolume->getDY(), spDensityVolume->getDZ());
double plengthW = m_fWidth / m_VoxelSize[0];
double plengthH = m_fHeight / m_VoxelSize[1];
// initialize parameter along the slice
//double tW = -(plengthW - 1) * 0.5,
// tH = -(plengthH - 1) * 0.5;
// new size of the slice
vpl::tSize Width = static_cast<vpl::tSize>(plengthW);
vpl::tSize Height = static_cast<vpl::tSize>(plengthH);
double cW = (Width - 1) * 0.5;
double cH = (Height - 1) * 0.5;
slice->resize(Width, Height);
osg::Vec3 positionR;
positionR[0] = CoordConv.fromRealXd(realPosition[0]) + 0.001;
positionR[1] = CoordConv.fromRealYd(realPosition[1]) + 0.001;
positionR[2] = CoordConv.fromRealZd(realPosition[2]) + 0.001;
#pragma omp parallel for
for (vpl::tSize i = 0; i < Width; i++)
{
for (vpl::tSize j = 0; j < Height; j++)
{
// compute point on the slice
osg::Vec3d point = positionR + vvec2 * (i - cW) + vvec1 * (j - cH);
point[2] += m_VoxelSize[2] * 0.5;
if (!(point[0] < 0 || point[0] >= volume->getXSize() ||
point[1] < 0 || point[1] >= volume->getYSize() ||
point[2] < 0 || point[2] >= volume->getZSize()))
{
vpl::img::CPoint3D p(point[0], point[1], point[2]);
vpl::tSize xx, yy, zz;
xx = static_cast<vpl::tSize>(point[0]);
yy = static_cast<vpl::tSize>(point[1]);
zz = static_cast<vpl::tSize>(point[2]);
(*slice)(i, j) = volume->at(xx, yy, zz);
}
else
{
(*slice)(i, j) = 0;
}
}
}
return true;
}
protected:
//! Update slice texture
void updateTextureData(const osg::Vec3& position, const osg::Vec3& vec1, const osg::Vec3& vec2, bool updateDensityImage = true, bool updateRegionImage = true);
void recomputePosition();
};
namespace Storage
{
//! Storage identifier of arbitrary slice
DECLARE_OBJECT(ArbitrarySlice, data::CArbitrarySlice, CORE_STORAGE_SLICE_ARB_ID);
}
} // namespace data
#endif // __CARBITRARYSLICE_H__
|
paradis.h | #include <cassert>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <random>
#include <sstream>
#include <thread>
#include <fstream>
#include <iomanip>
#include <iterator>
#include <climits>
#include <omp.h>
#define FOR(i,a,b) for(int i=a;i<b;i++)
#define rep(i,b) FOR(i,0,b)
const int MaxThreadNum=224;
const long long MaxDataSize=10000000000;
const long long MaxDataNum=4294967295;
const int MaxKisuu=256;
std::vector<int> Dataset;
long long Datasize;
static const int kRadixBits = 8;
static const size_t kInsertSortThreshold = 0;
static const int kRadixMask = (1 << kRadixBits) - 1;
static const int kRadixBin = 1 << kRadixBits;
template<class D>
inline int determineDigitBucket(int stage,D num){
return ((num>>(8*stage))&kRadixMask);
}
template< class _Type>
inline void _swap(_Type &a, _Type&b) {
_Type temp = b;
b = a;
a = temp;
}
void report_num_threads(int level)
{
#pragma omp single
{
printf("Level %d: number of threads in the team - %d\n",
level, omp_get_num_threads());
}
}
template<class T>
bool compare(const T &x,const T &y){
return x < y;
}
template <class RandomIt>
inline void insert_sort_core_(RandomIt s, RandomIt e)
{
for (RandomIt i = s + 1; i < e; ++i) {
if (compare(*i, *(i - 1))) {
RandomIt j;
auto tmp = *i;
*i = *(i - 1);
for (j = i - 1; j > s && compare(tmp, *(j - 1)); --j) {
*j = *(j - 1);
}
*j = tmp;
}
}
}
template<int kth_byte,class RandomIt>
inline void PARADIS_core(RandomIt s,RandomIt t,RandomIt begin_itr,int processes=1){
long long cnt[MaxKisuu]={0};
long long elenum=distance(s,t);
long long start=distance(begin_itr,s);
//assert(start>=0);assert(elenum>=0);
//step1
//assert(processes>0);
long long part=elenum/processes;
long long res=elenum%processes;
long long localHists[MaxThreadNum][MaxKisuu];
long long gh[MaxKisuu],gt[MaxKisuu],starts[MaxKisuu],ends[MaxKisuu];
long long ph[MaxThreadNum][MaxKisuu];
long long pt[MaxThreadNum][MaxKisuu];
long long SumCi=elenum;
long long pfp[processes+1];
int var_p=processes;
#pragma omp parallel num_threads(processes)
{
int th=omp_get_thread_num();
#pragma omp for
rep(i,kRadixBin){
rep(t,processes)localHists[t][i]=0;
}
#pragma omp barrier
#pragma omp for
for(int i=start;i<start+elenum;i++){
int digit=determineDigitBucket(kth_byte,*(begin_itr+i));
localHists[th][digit]++;
}
#pragma omp barrier
#pragma omp for
for(int i=0;i<kRadixBin;i++){
for(int j=0;j<processes;j++){
cnt[i]+=localHists[j][i];
}
}
#pragma omp barrier
#pragma omp single
{
gh[0]=start;
gt[0]=gh[0]+cnt[0];
starts[0]=gh[0];
}
//step2
#pragma omp single
for(int i=1;i<kRadixBin;i++){
//calc ghi
gh[i]=gh[i-1]+cnt[i-1];
//calc gti
gt[i]=gh[i]+cnt[i];
starts[i]=gh[i];
}
#pragma omp barrier
//step3
while(SumCi!=0){
#pragma omp for
for(int ii=0;ii<processes;ii++){
int pID=omp_get_thread_num();
for(int i=0;i<kRadixBin;i++){
long long part=(long long)(gt[i]-gh[i])/(long long)var_p;
long long res=(long long)(gt[i]-gh[i])%(long long)(var_p);
if(pID<var_p-1){
ph[pID][i]=part*pID+gh[i];
pt[pID][i]=part*(pID+1LL)+gh[i];
}else{
ph[pID][i]=part*pID+gh[i];
pt[pID][i]=part*(pID+1LL)+gh[i]+res;
}
}
for(int i=0;i<kRadixBin;i++){
long long head=ph[pID][i];
while(head<pt[pID][i]){
auto v=*(begin_itr+head);
int k=determineDigitBucket(kth_byte,v);
while(k!=i&&ph[pID][k]<pt[pID][k]){
_swap(v,*(begin_itr+(int)ph[pID][k]));ph[pID][k]++;
k=determineDigitBucket(kth_byte,v);
}
if(k==i){
*(begin_itr+head)=*(begin_itr+ph[pID][i]);head++;
*(begin_itr+ph[pID][i])=v;ph[pID][i]++;
}else{
*(begin_itr+head)=v;head++;
}
}
}
}//end of omp permute
#pragma omp single
{
SumCi=0;
long long pfpN=kRadixBin/var_p;
long long pfpM=kRadixBin%var_p;
pfp[0]=0LL;
long long pfpMR=0LL;
for(long long i=1LL;i<var_p+1LL;i++){
if(pfpMR<pfpM)pfpMR++;
pfp[i]=i*pfpN+pfpMR;
}
}
#pragma omp barrier
#pragma omp for
for(int k=0;k<processes;k++){
for(long long i=pfp[k];i<pfp[k+1];i++){
long long tail=gt[i];
{
for(int pID=0;pID<processes;pID++){
long long head=ph[pID][i];
while(head<pt[pID][i]&&head<tail){
int v=*(begin_itr+head);head++;
if(determineDigitBucket(kth_byte,v)!=i){
while(head<=tail){
tail--;
int w=*(begin_itr+tail);
if(determineDigitBucket(kth_byte,w)==i){
*(begin_itr+(head-1))=w;
*(begin_itr+tail)=v;
break;
}
}
}
}
}
}
gh[i]=tail;
}
}
#pragma omp barrier
#pragma omp single
{
int prevSumCi=SumCi;
SumCi-0;
for(int i=0;i<kRadixBin;i++){
SumCi+=(gt[i]-gh[i]);
}
}
#pragma omp barrier
}//end of while
}//end of omp2
if(kth_byte>0){
#pragma omp parallel num_threads(processes)
#pragma omp single
{
for(int i=0;i<kRadixBin;i++){
int nextStageThreads=1;
nextStageThreads=processes*(cnt[i]*(log(cnt[i])/log(kRadixBin))/(elenum*(log(elenum)/log(kRadixBin))));
if(cnt[i]>64LL){
#pragma omp task
PARADIS_core<(kth_byte > 0 ? (kth_byte - 1) : 0)>(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i]),begin_itr,std::max(nextStageThreads,1));
}else if(cnt[i]>1){
insert_sort_core_(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i]));
//std::sort(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i]));
}
}
#pragma omp taskwait
}
}
}
template<class RandomIt>
inline void PARADIS(RandomIt s,RandomIt t,int threadNum){
const size_t vsize=sizeof(typename std::iterator_traits<RandomIt>::value_type);
PARADIS_core<vsize-1>(s,t,s,threadNum);
}
|
GB_binop__gt_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint8)
// A*D function (colscale): GB (_AxD__gt_uint8)
// D*A function (rowscale): GB (_DxB__gt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint8)
// C=scalar+B GB (_bind1st__gt_uint8)
// C=scalar+B' GB (_bind1st_tran__gt_uint8)
// C=A+scalar GB (_bind2nd__gt_uint8)
// C=A'+scalar GB (_bind2nd_tran__gt_uint8)
// C type: bool
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint32
// op(A') function: GB_tran__identity_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/option-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
**magick_restrict pixels,
scale,
translate;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static double **DestroyPixelThreadSet(double **pixels)
{
ssize_t
i;
if (pixels == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (double *) NULL)
pixels[i]=(double *) RelinquishMagickMemory(pixels[i]);
pixels=(double **) RelinquishMagickMemory(pixels);
return(pixels);
}
static double **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
double
**pixels;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (double **) NULL)
return((double **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(double *) AcquireQuantumMemory(columns,channels*sizeof(**pixels));
if (pixels[i] == (double *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) cmsGetContextUserData(context);
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define GetLCMSPixel(source_info,pixel) \
(source_info.scale*QuantumScale*(pixel)+source_info.translate)
#define ProfileImageTag "Profile/Image"
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info.scale*QuantumRange*(pixel)+target_info.translate)
#define ThrowProfileException(severity,tag,context) \
{ \
if (profile != (StringInfo *) NULL) \
profile=DestroyStringInfo(profile); \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
magick_unreferenced(clone);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsContext
cms_context;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_context=cmsCreateContext(NULL,image);
if (cms_context == (cmsContext) NULL)
ThrowBinaryImageException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,LCMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryImageException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
ExceptionInfo
*exception;
MagickOffsetType
progress;
ssize_t
y;
exception=(&image->exception);
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
source_info.scale=1.0;
source_info.translate=0.0;
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
target_info.scale=1.0;
target_info.translate=0.0;
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels);
if ((source_info.pixels == (double **) NULL) ||
(target_info.pixels == (double **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
profile=DestroyStringInfo(profile);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
IndexPacket
*magick_restrict indexes;
double
*p;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_info.pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(q));
if (source_info.channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(q));
}
if (source_info.channels > 3)
{
*p=GetLCMSPixel(source_info,0);
if (indexes != (IndexPacket *) NULL)
*p=GetLCMSPixel(source_info,GetPixelIndex(indexes+x));
p++;
}
q++;
}
cmsDoTransform(transform[id],source_info.pixels[id],
target_info.pixels[id],(unsigned int) image->columns);
p=target_info.pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,SetLCMSPixel(target_info,*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_info.channels > 1)
{
SetPixelGreen(q,SetLCMSPixel(target_info,*p));
p++;
SetPixelBlue(q,SetLCMSPixel(target_info,*p));
p++;
}
if (target_info.channels > 3)
{
if (indexes != (IndexPacket *) NULL)
SetPixelIndex(indexes+x,SetLCMSPixel(target_info,*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->x_resolution=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->y_resolution=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"CorruptImageProfile","`%s' (XMP)",image->filename);
return(MagickFalse);
}
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","'%s' (XML)",
image->filename);
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive)
{
char
key[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(image,profile) == MagickFalse))
return(MagickTrue);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
}
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong(
image->x_resolution*2.54*65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong(
image->x_resolution*65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong(
image->y_resolution*2.54*65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong(
image->y_resolution*65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
multiphase_builder_and_solver.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: rrossi $
* Date: $Date: 2008-11-19 16:12:53 $
* Revision: $Revision: 1.10 $
*
* ***********************************************************/
#if !defined(KRATOS_MULTIPHASE_BUILDER_AND_SOLVER )
#define KRATOS_MULTIPHASE_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
#include "boost/smart_ptr.hpp"
#include "utilities/timer.h"
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template < class TSparseSpace,
class TDenseSpace , //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class MultiPhaseBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( MultiPhaseBuilderAndSolver );
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef OpenMPUtils::PartitionVector PartitionVector;
typedef typename boost::numeric::ublas::matrix_row< TSystemMatrixType > RowType;
typedef boost::numeric::ublas::vector<int> IndexVector;
typedef std::size_t KeyType; // For Dof->GetVariable().Key()
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
MultiPhaseBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver )
: BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >( pNewLinearSystemSolver )
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
virtual ~MultiPhaseBuilderAndSolver() {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b )
{
KRATOS_WATCH("in Build(), line 215")
KRATOS_TRY
if ( !pScheme )
KRATOS_THROW_ERROR( std::runtime_error, "No scheme provided!", "" );
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *( BaseType::mpReactionsVector ) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType( 0 );
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
//double StartTime = GetTickCount();
// ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
KRATOS_WATCH("in Build(), line 243")
#ifndef _OPENMP
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
for ( typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it )
{
bool element_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
element_is_active = (*it)->Is(ACTIVE);
if ( element_is_active )
{
//calculate elemental contribution
pScheme->CalculateSystemContributions( *it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS( A, LHS_Contribution, EquationId );
AssembleRHS( b, RHS_Contribution, EquationId );
// clean local elemental memory
pScheme->CleanMemory( *it );
}
}
//double EndTime = GetTickCount();
//std::cout << "total time " << EndTime - StartTime << std::endl;
//std::cout << "writing in the system matrix " << ccc << std::endl;
//std::cout << "calculating the elemental contrib " << ddd << std::endl;
LHS_Contribution.resize( 0, 0, false );
RHS_Contribution.resize( 0, false );
// assemble all conditions
for ( typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it )
{
bool condition_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
condition_is_active = (*it)->Is(ACTIVE);
if ( condition_is_active )
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions( *it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS( A, LHS_Contribution, EquationId );
AssembleRHS( b, RHS_Contribution, EquationId );
}
}
#else
std::vector< omp_lock_t > lock_array( A.size1() );
int A_size = A.size1();
KRATOS_WATCH("in Build(), line 290")
for ( int i = 0; i < A_size; i++ )
omp_init_lock( &lock_array[i] );
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition( number_of_threads, pElements.size(), element_partition );
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
KRATOS_WATCH("in Build(), line 306")
#pragma omp parallel for
for ( int k = 0; k < number_of_threads; k++ )
{
KRATOS_WATCH("in Build(), line 310")
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType( 0 );
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k+1];
KRATOS_WATCH("in Build(), line 321")
// assemble all elements
for ( typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
bool element_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
element_is_active = (*it)->Is(ACTIVE);
if ( element_is_active )
{
//calculate elemental contribution
pScheme->CalculateSystemContributions( *it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
Assemble( A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array );
// clean local elemental memory
pScheme->CleanMemory( *it );
}
}
}
KRATOS_WATCH("in Build(), line 337")
vector<unsigned int> condition_partition;
CreatePartition( number_of_threads, ConditionsArray.size(), condition_partition );
KRATOS_WATCH("in Build(), line 341")
#pragma omp parallel for
for ( int k = 0; k < number_of_threads; k++ )
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType( 0 );
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k+1];
// assemble all elements
for ( typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
bool condition_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
condition_is_active = (*it)->Is(ACTIVE);
if ( condition_is_active )
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions( *it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
Assemble( A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array );
}
}
}
KRATOS_WATCH("in Build(), line 371")
double stop_prod = omp_get_wtime();
for ( int i = 0; i < A_size; i++ )
omp_destroy_lock( &lock_array[i] );
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH( "finished parallel building" );
#endif
KRATOS_WATCH("in Build(), line 383")
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A )
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *( BaseType::mpReactionsVector ) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for ( typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it )
{
bool element_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
element_is_active = (*it)->Is(ACTIVE);
if ( element_is_active )
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution( *it, LHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS( A, LHS_Contribution, EquationId );
// clean local elemental memory
pScheme->CleanMemory( *it );
}
}
LHS_Contribution.resize( 0, 0, false );
// assemble all conditions
for ( typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it )
{
bool condition_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
condition_is_active = (*it)->Is(ACTIVE);
if ( condition_is_active )
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution( *it, LHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS( A, LHS_Contribution, EquationId );
}
}
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A )
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *( BaseType::mpReactionsVector ) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for ( typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it )
{
bool element_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
element_is_active = (*it)->Is(ACTIVE);
if ( element_is_active )
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution( *it, LHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows( A, LHS_Contribution, EquationId );
// clean local elemental memory
pScheme->CleanMemory( *it );
}
}
LHS_Contribution.resize( 0, 0, false );
// assemble all conditions
for ( typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it )
{
bool condition_is_active = true;
if( (*it)->IsDefined(ACTIVE) )
condition_is_active = (*it)->Is(ACTIVE);
if ( condition_is_active )
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution( *it, LHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows( A, LHS_Contribution, EquationId );
}
}
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
double norm_b;
if ( TSparseSpace::Size( b ) != 0 )
norm_b = TSparseSpace::TwoNorm( b );
else
norm_b = 0.00;
if ( norm_b != 0.00 )
BaseType::mpLinearSystemSolver->Solve( A, Dx, b );
else
TSparseSpace::SetToZero( Dx );
//prints informations about the current time
if ( this->GetEchoLevel() > 1 )
{
std::cout << *( BaseType::mpLinearSystemSolver ) << std::endl;
}
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
// boost::timer building_time;
Timer::Start( "Build" );
Build( pScheme, r_model_part, A, b );
Timer::Stop( "Build" );
// if(this->GetEchoLevel()>0)
// {
// std::cout << "Building Time : " << building_time.elapsed() << std::endl;
// }
// ApplyPointLoads(pScheme,r_model_part,b);
//does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions( pScheme, r_model_part, A, Dx, b );
if ( this->GetEchoLevel() == 3 )
{
std::cout << "before the solution of the system" << std::endl;
std::cout << "System Matrix = " << A << std::endl;
std::cout << "unknowns vector = " << Dx << std::endl;
std::cout << "RHS vector = " << b << std::endl;
}
// boost::timer solve_time;
Timer::Start( "Solve" );
SystemSolve( A, Dx, b );
Timer::Stop( "Solve" );
// if(this->GetEchoLevel()>0)
// {
// std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl;
// }
if ( this->GetEchoLevel() == 3 )
{
std::cout << "after the solution of the system" << std::endl;
std::cout << "System Matrix = " << A << std::endl;
std::cout << "unknowns vector = " << Dx << std::endl;
std::cout << "RHS vector = " << b << std::endl;
}
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
BuildRHS( pScheme, r_model_part, b );
SystemSolve( A, Dx, b );
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b )
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *( BaseType::mpReactionsVector ) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType( 0, 0 );
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType( 0 );
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for ( typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it )
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution( *it, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleRHS( b, RHS_Contribution, EquationId );
}
LHS_Contribution.resize( 0, 0, false );
RHS_Contribution.resize( 0, false );
// assemble all conditions
for ( typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it )
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution( *it, RHS_Contribution, EquationId, CurrentProcessInfo );
//assemble the elemental contribution
AssembleRHS( b, RHS_Contribution, EquationId );
}
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part
)
{
KRATOS_TRY
KRATOS_WATCH( "setting up the dofs" );
//Gets the array of elements from the modeler
ElementsArrayType& pElements = r_model_part.Elements();
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
//mDofSet.clear();
//double StartTime = GetTickCount();
for ( typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it )
{
// gets list of Dof involved on every element
//aaa = GetTickCount();
pScheme->GetElementalDofList( *it, ElementalDofList, CurrentProcessInfo );
//bbb += GetTickCount() - aaa;
/*KRATOS_WATCH((*it)->Id());
std::cout << "node ids" << std::endl;
for(unsigned int i=0; i<((*it)->GetGeometry()).size(); i++)
std::cout << ((*it)->GetGeometry())[i].Id() << " ";
std::cout << std::endl;
for(unsigned int i=0; i<ElementalDofList.size(); i++)
std::cout << (ElementalDofList[i]->Id()) << " ";
std::cout << std::endl;*/
//KRATOS_WATCH(ElementalDofList);
//ccc = GetTickCount();
for ( typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i )
{
Doftemp.push_back( *i );
//mDofSet.push_back(*i);
}
//ddd += GetTickCount() - ccc;
}
//std::cout << "searching " << bbb << std::endl;
//std::cout << "inserting " << ddd << std::endl;
//taking in account conditions
ConditionsArrayType& pConditions = r_model_part.Conditions();
for ( typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it )
{
// gets list of Dof involved on every element
pScheme->GetConditionDofList( *it, ElementalDofList, CurrentProcessInfo );
//ccc = GetTickCount();
for ( typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i )
{
//mDofSet.push_back(*i);
Doftemp.push_back( *i );
}
//ddd += GetTickCount() - ccc;
}
//std::cout << "searching " << bbb << std::endl;
//std::cout << "inserting " << ddd << std::endl;
/*for (typename DofsArrayType::iterator dof_iterator = Doftemp.begin(); dof_iterator != Doftemp.end(); ++dof_iterator)
{
KRATOS_WATCH(*dof_iterator);
}
std::cout << "DofTemp before Unique" << Doftemp.size() << std::endl;
*/
//ccc = GetTickCount();
Doftemp.Unique();
//std::cout << "DofTemp after Unique" << Doftemp.size() << std::endl;
BaseType::mDofSet = Doftemp;
//ddd = GetTickCount() - ccc;
//std::cout << "Unique " << ddd << std::endl;
//throws an execption if there are no Degrees of freedom involved in the analysis
if ( BaseType::mDofSet.size() == 0 )
KRATOS_THROW_ERROR( std::logic_error, "No degrees of freedom!", "" );
BaseType::mDofSetIsInitialized = true;
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void SetUpSystem(
ModelPart& r_model_part
)
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
//partitioning the equation ids by variable type
//first run: displacements
for ( typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator )
{
KeyType CurrVar = dof_iterator->GetVariable().Key();
if (( CurrVar == DISPLACEMENT_X ) || ( CurrVar == DISPLACEMENT_Y )
|| ( CurrVar == DISPLACEMENT_Z ) )
{
if ( dof_iterator->IsFixed() )
dof_iterator->SetEquationId( --fix_id );
else
dof_iterator->SetEquationId( free_id++ );
}
}
mDisplacementFreeEnd = free_id;
mDisplacementFixedEnd = fix_id;
mDisplacementFreeDofs = free_id;
KRATOS_WATCH( mDisplacementFreeEnd );
KRATOS_WATCH( mDisplacementFixedEnd );
KRATOS_WATCH( mDisplacementFreeDofs );
//second run: water pressures
for ( typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator )
{
KeyType CurrVar = dof_iterator->GetVariable().Key();
if (( CurrVar == WATER_PRESSURE ) )
{
if ( dof_iterator->IsFixed() )
dof_iterator->SetEquationId( --fix_id );
else
dof_iterator->SetEquationId( free_id++ );
}
}
mWaterPressureFreeEnd = free_id;
mWaterPressureFixedEnd = fix_id;
mWaterPressureFreeDofs = free_id - mDisplacementFreeDofs;
KRATOS_WATCH( mWaterPressureFreeEnd );
KRATOS_WATCH( mWaterPressureFixedEnd );
KRATOS_WATCH( mWaterPressureFreeDofs );
//third run: air pressures
for ( typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator )
{
KeyType CurrVar = dof_iterator->GetVariable().Key();
if (( CurrVar == AIR_PRESSURE ) )
{
if ( dof_iterator->IsFixed() )
dof_iterator->SetEquationId( --fix_id );
else
dof_iterator->SetEquationId( free_id++ );
}
}
mAirPressureFreeEnd = free_id;
mAirPressureFixedEnd = fix_id;
mAirPressureFreeDofs = free_id - mWaterPressureFreeDofs - mDisplacementFreeDofs;
KRATOS_WATCH( mAirPressureFreeEnd );
KRATOS_WATCH( mAirPressureFixedEnd );
KRATOS_WATCH( mAirPressureFreeDofs );
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
)
{
KRATOS_WATCH("in ResizeAndInitializeVectors, line 885");
KRATOS_TRY
if ( pA == NULL ) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType( new TSystemMatrixType( 0, 0 ) );
pA.swap( pNewA );
}
if ( pDx == NULL ) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType( new TSystemVectorType( 0 ) );
pDx.swap( pNewDx );
}
if ( pb == NULL ) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType( new TSystemVectorType( 0 ) );
pb.swap( pNewb );
}
if ( BaseType::mpReactionsVector == NULL ) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType( new TSystemVectorType( 0 ) );
BaseType::mpReactionsVector.swap( pNewReactionsVector );
}
// Member Matrices
if ( mpKuu == NULL )
{
TSystemMatrixPointerType pNewKuu( new TSystemMatrixType( 0, 0 ) );
mpKuu.swap( pNewKuu );
}
if ( mpKuw == NULL )
{
TSystemMatrixPointerType pNewKuw( new TSystemMatrixType( 0, 0 ) );
mpKuw.swap( pNewKuw );
}
if ( mpKua == NULL )
{
TSystemMatrixPointerType pNewKua( new TSystemMatrixType( 0, 0 ) );
mpKua.swap( pNewKua );
}
if ( mpKwu == NULL )
{
TSystemMatrixPointerType pNewKwu( new TSystemMatrixType( 0, 0 ) );
mpKwu.swap( pNewKwu );
}
if ( mpKww == NULL )
{
TSystemMatrixPointerType pNewKww( new TSystemMatrixType( 0, 0 ) );
mpKww.swap( pNewKww );
}
if ( mpKwa == NULL )
{
TSystemMatrixPointerType pNewKwa( new TSystemMatrixType( 0, 0 ) );
mpKwa.swap( pNewKwa );
}
if ( mpKau == NULL )
{
TSystemMatrixPointerType pNewKau( new TSystemMatrixType( 0, 0 ) );
mpKau.swap( pNewKau );
}
if ( mpKaw == NULL )
{
TSystemMatrixPointerType pNewKaw( new TSystemMatrixType( 0, 0 ) );
mpKaw.swap( pNewKaw );
}
if ( mpKaa == NULL )
{
TSystemMatrixPointerType pNewKaa( new TSystemMatrixType( 0, 0 ) );
mpKaa.swap( pNewKaa );
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
TSystemMatrixType& Kuu = *mpKuu;
TSystemMatrixType& Kuw = *mpKuw;
TSystemMatrixType& Kua = *mpKua;
TSystemMatrixType& Kwu = *mpKwu;
TSystemMatrixType& Kww = *mpKww;
TSystemMatrixType& Kwa = *mpKwa;
TSystemMatrixType& Kau = *mpKau;
TSystemMatrixType& Kaw = *mpKaw;
TSystemMatrixType& Kaa = *mpKaa;
//resizing the system vectors and matrix
if ( BaseType::GetReshapeMatrixFlag() == true || // if we must remesh
// mDofSetChanged == true || // if the dof set has changed
Kuu.size1() == 0 || Kuw.size1() == 0 || Kua.size1() == 0 ||
Kwu.size1() == 0 || Kww.size1() == 0 || Kwa.size1() == 0 ||
Kau.size1() == 0 || Kaw.size1() == 0 || Kaa.size1() == 0 ||
A.size1() == 0 ) //if the matrices are not initialized
{
Kuu.resize( mDisplacementFreeDofs, mDisplacementFreeDofs, false );
Kuw.resize( mDisplacementFreeDofs, mWaterPressureFreeDofs, false );
Kua.resize( mDisplacementFreeDofs, mAirPressureFreeDofs, false );
Kwu.resize( mWaterPressureFreeDofs, mDisplacementFreeDofs, false );
Kww.resize( mWaterPressureFreeDofs, mWaterPressureFreeDofs, false );
Kwa.resize( mWaterPressureFreeDofs, mAirPressureFreeDofs, false );
Kau.resize( mAirPressureFreeDofs, mDisplacementFreeDofs, false );
Kaw.resize( mAirPressureFreeDofs, mWaterPressureFreeDofs, false );
Kaa.resize( mAirPressureFreeDofs, mAirPressureFreeDofs, false );
ConstructMatrixStructure(pScheme, Kuu, Kuw, Kua, Kwu, Kww, Kwa, Kau, Kaw, Kaa, rModelPart.Elements(), rModelPart.Conditions(), rModelPart.GetProcessInfo() );
ConstructMatrixStructure(pScheme, A, rModelPart.Elements(), rModelPart.Conditions(), rModelPart.GetProcessInfo() );
A.resize( BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false );
AllocateSystemMatrix( A );
ConstructSystemMatrix( A );
}
else
{
// I do the check only for A, as the remaining matrices are private.
// They are managed by this class, so they shouldn't change size spontaneously
if ( A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize )
{
KRATOS_WATCH( "it should not come here!!!!!!!! ... this is SLOW" );
A.resize( BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false );
AllocateSystemMatrix( A );
ConstructSystemMatrix( A );
}
}
if ( Dx.size() != BaseType::mEquationSystemSize )
Dx.resize( BaseType::mEquationSystemSize, false );
if ( b.size() != BaseType::mEquationSystemSize )
b.resize( BaseType::mEquationSystemSize, false );
//if needed resize the vector for the calculation of reactions
if ( BaseType::mCalculateReactionsFlag == true )
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if ( BaseType::mpReactionsVector->size() != ReactionsVectorSize )
BaseType::mpReactionsVector->resize( ReactionsVectorSize, false );
}
KRATOS_CATCH( "" )
KRATOS_WATCH("in ResizeAndInitializeVectors, line 1039");
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
KRATOS_CATCH( "" )
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
//refresh RHS to have the correct reactions
BuildRHS( pScheme, r_model_part, b );
int i;
int systemsize = BaseType::mDofSet.size() - TSparseSpace::Size( *BaseType::mpReactionsVector );
typename DofsArrayType::ptr_iterator it2;
//updating variables
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for ( it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2 )
{
if (( *it2 )->IsFixed() )
{
i = ( *it2 )->EquationId();
i -= systemsize;
( *it2 )->GetSolutionStepReactionValue() = ReactionsVector[i];
}
}
}
//**************************************************************************
//**************************************************************************
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{}
//**************************************************************************
//**************************************************************************
void ApplyPointLoads(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b )
{}
/**
this function is intended to be called at the end of the solution step to clean up memory
storage not needed
*/
void Clear()
{
this->mDofSet = DofsArrayType();
if ( this->mpReactionsVector != NULL )
TSparseSpace::Clear(( this->mpReactionsVector ) );
// this->mReactionsVector = TSystemVectorType();
if ( this->GetEchoLevel() > 0 )
{
KRATOS_WATCH( "MultiPhaseBuilderAndSolver Clear Function called" );
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ElementsContainerType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo )
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices( equation_size );
// std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
Element::EquationIdVectorType ids( 3, 0 );
for ( typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++ )
{
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
for ( std::size_t i = 0 ; i < ids.size() ; i++ )
if ( ids[i] < equation_size )
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for ( std::size_t j = 0 ; j < ids.size() ; j++ )
if ( ids[j] < equation_size )
{
AddUnique( row_indices, ids[j] );
//indices[ids[i]].push_back(ids[j]);
}
}
}
for ( typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++ )
{
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
for ( std::size_t i = 0 ; i < ids.size() ; i++ )
if ( ids[i] < equation_size )
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for ( std::size_t j = 0 ; j < ids.size() ; j++ )
if ( ids[j] < equation_size )
{
AddUnique( row_indices, ids[j] );
// indices[ids[i]].push_back(ids[j]);
}
}
}
//allocating the memory needed
int data_size = 0;
for ( std::size_t i = 0 ; i < indices.size() ; i++ )
{
data_size += indices[i].size();
}
A.reserve( data_size, false );
//filling with zero the matrix (creating the structure)
Timer::Start( "MatrixStructure" );
#ifndef _OPENMP
for ( std::size_t i = 0 ; i < indices.size() ; i++ )
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort( row_indices.begin(), row_indices.end() );
for ( std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; it++ )
{
A.push_back( i, *it, 0.00 );
}
row_indices.clear();
}
#else
int number_of_threads = omp_get_max_threads();
vector<unsigned int> matrix_partition;
CreatePartition( number_of_threads, indices.size(), matrix_partition );
KRATOS_WATCH( matrix_partition );
for ( int k = 0; k < number_of_threads; k++ )
{
#pragma omp parallel
if ( omp_get_thread_num() == k )
{
for ( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ )
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort( row_indices.begin(), row_indices.end() );
for ( std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; it++ )
{
A.push_back( i, *it, 0.00 );
}
row_indices.clear();
}
}
}
#endif
Timer::Stop( "MatrixStructure" );
}
//
// //filling with zero the matrix (creating the structure)
// for(std::size_t i = 0 ; i < indices.size() ; i++)
// {
// std::vector<std::size_t>& row_indices = indices[i];
// std::sort(row_indices.begin(), row_indices.end());
//
// for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
// {
// A.push_back(i,*it,0.00);
// // A()(i,*it) = 0.00;
// }
// //row_indices = std::vector<std::size_t>();
// row_indices.clear();
// }
// }
/// Compute graphs for the different matrices involved in the problem
virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme,
TSystemMatrixType& Kuu, TSystemMatrixType& Kuw, TSystemMatrixType& Kua,
TSystemMatrixType& Kwu, TSystemMatrixType& Kww, TSystemMatrixType& Kwa,
TSystemMatrixType& Kau, TSystemMatrixType& Kaw, TSystemMatrixType& Kaa,
const ElementsContainerType& rElements,
const ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo )
{
std::vector< std::vector<std::size_t> > indicesKuu( mDisplacementFreeDofs );
std::vector< std::vector<std::size_t> > indicesKuw( mDisplacementFreeDofs );
std::vector< std::vector<std::size_t> > indicesKua( mDisplacementFreeDofs );
std::vector< std::vector<std::size_t> > indicesKwu( mWaterPressureFreeDofs );
std::vector< std::vector<std::size_t> > indicesKww( mWaterPressureFreeDofs );
std::vector< std::vector<std::size_t> > indicesKwa( mWaterPressureFreeDofs );
std::vector< std::vector<std::size_t> > indicesKau( mAirPressureFreeDofs );
std::vector< std::vector<std::size_t> > indicesKaw( mAirPressureFreeDofs );
std::vector< std::vector<std::size_t> > indicesKaa( mAirPressureFreeDofs );
Element::EquationIdVectorType ids;
ids.reserve( 120 ); // 120 as initial capacity: 5 Dofs per node for hex20 assumed
// Identify and collect the indices of non-zero terms in each matrix
for ( typename ElementsContainerType::const_iterator itElem = rElements.begin();
itElem != rElements.end(); itElem++ )
{
pScheme->EquationId( *(itElem.base()) , ids, CurrentProcessInfo);
for ( std::size_t i = 0; i < ids.size(); i++ )
{
if ( ids[i] < mDisplacementFreeDofs )
{
std::vector<std::size_t>& RowKuu = indicesKuu[ids[i]];
std::vector<std::size_t>& RowKuw = indicesKuw[ids[i]];
std::vector<std::size_t>& RowKua = indicesKua[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKuu, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKuw, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKua, ids[j] );
}
}
else if ( ids[i] < mWaterPressureFreeDofs )
{
std::vector<std::size_t>& RowKwu = indicesKwu[ids[i]];
std::vector<std::size_t>& RowKww = indicesKww[ids[i]];
std::vector<std::size_t>& RowKwa = indicesKwa[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKwu, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKww, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKwa, ids[j] );
}
}
else if ( ids[i] < mAirPressureFreeDofs )
{
std::vector<std::size_t>& RowKau = indicesKau[ids[i]];
std::vector<std::size_t>& RowKaw = indicesKaw[ids[i]];
std::vector<std::size_t>& RowKaa = indicesKaa[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKau, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKaw, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKaa, ids[j] );
}
}
}
}
// Do the same for conditions
for ( typename ConditionsArrayType::const_iterator itCond = rConditions.begin();
itCond != rConditions.end(); itCond++ )
{
pScheme->Condition_EquationId( *(itCond.base()), ids, CurrentProcessInfo);
for ( std::size_t i = 0; i < ids.size(); i++ )
{
if ( ids[i] < mDisplacementFreeDofs )
{
std::vector<std::size_t>& RowKuu = indicesKuu[ids[i]];
std::vector<std::size_t>& RowKuw = indicesKuw[ids[i]];
std::vector<std::size_t>& RowKua = indicesKua[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKuu, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKuw, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKua, ids[j] );
}
}
else if ( ids[i] < mWaterPressureFreeDofs )
{
std::vector<std::size_t>& RowKwu = indicesKwu[ids[i]];
std::vector<std::size_t>& RowKww = indicesKww[ids[i]];
std::vector<std::size_t>& RowKwa = indicesKwa[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKwu, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKww, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKwa, ids[j] );
}
}
else if ( ids[i] < mAirPressureFreeDofs )
{
std::vector<std::size_t>& RowKau = indicesKau[ids[i]];
std::vector<std::size_t>& RowKaw = indicesKaw[ids[i]];
std::vector<std::size_t>& RowKaa = indicesKaa[ids[i]];
for ( std::size_t j = 0; j < ids.size(); j++ )
{
if ( ids[j] < mDisplacementFreeDofs )
AddUnique( RowKau, ids[j] );
else if ( ids[j] < mWaterPressureFreeDofs )
AddUnique( RowKaw, ids[j] );
else if ( ids[j] < mAirPressureFreeDofs )
AddUnique( RowKaa, ids[j] );
}
}
}
}
// Allocate memory and initialize matrices with zeros
int NumTermsKuu = 0; // Counters for non-zero terms
int NumTermsKuw = 0;
int NumTermsKua = 0;
int NumTermsKwu = 0;
int NumTermsKww = 0;
int NumTermsKwa = 0;
int NumTermsKau = 0;
int NumTermsKaw = 0;
int NumTermsKaa = 0;
for ( std::size_t i = 0; i < indicesKuu.size(); i++ )
NumTermsKuu += indicesKuu[i].size();
Kuu.reserve( NumTermsKuu, false );
for ( std::size_t i = 0; i < indicesKuw.size(); i++ )
NumTermsKuw += indicesKuw[i].size();
Kuw.reserve( NumTermsKuw, false );
for ( std::size_t i = 0; i < indicesKua.size(); i++ )
NumTermsKua += indicesKua[i].size();
Kua.reserve( NumTermsKua, false );
for ( std::size_t i = 0; i < indicesKwu.size(); i++ )
NumTermsKwu += indicesKwu[i].size();
Kwu.reserve( NumTermsKwu, false );
for ( std::size_t i = 0; i < indicesKww.size(); i++ )
NumTermsKww += indicesKww[i].size();
Kww.reserve( NumTermsKww, false );
for ( std::size_t i = 0; i < indicesKwa.size(); i++ )
NumTermsKwa += indicesKwa[i].size();
Kwa.reserve( NumTermsKwa, false );
for ( std::size_t i = 0; i < indicesKau.size(); i++ )
NumTermsKau += indicesKau[i].size();
Kau.reserve( NumTermsKau, false );
for ( std::size_t i = 0; i < indicesKaw.size(); i++ )
NumTermsKaw += indicesKaw[i].size();
Kaw.reserve( NumTermsKaw, false );
for ( std::size_t i = 0; i < indicesKaa.size(); i++ )
NumTermsKaa += indicesKaa[i].size();
Kaa.reserve( NumTermsKaa, false );
// Create the matrix structure, filling it with zeros
AllocateSpace( Kuu, indicesKuu );
AllocateSpace( Kuw, indicesKuw );
AllocateSpace( Kua, indicesKua );
AllocateSpace( Kwu, indicesKwu );
AllocateSpace( Kww, indicesKww );
AllocateSpace( Kwa, indicesKwa );
AllocateSpace( Kau, indicesKau );
AllocateSpace( Kaw, indicesKaw );
AllocateSpace( Kaa, indicesKaa );
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for ( unsigned int i_local = 0; i_local < local_size; i_local++ )
{
unsigned int i_global = EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for ( unsigned int j_local = 0; j_local < local_size; j_local++ )
{
unsigned int j_global = EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
A( i_global, j_global ) += LHS_Contribution( i_local, j_local );
}
}
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if ( BaseType::mCalculateReactionsFlag == false ) //if we don't need to calculate reactions
{
for ( unsigned int i_local = 0; i_local < local_size; i_local++ )
{
unsigned int i_global = EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
else //when the calculation of reactions is needed
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for ( unsigned int i_local = 0; i_local < local_size; i_local++ )
{
unsigned int i_global = EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
else //on "fixed" DOFs
{
// Assembling the Vector of REACTIONS
ReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
}
}
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
unsigned int mDisplacementFreeEnd;
unsigned int mDisplacementFixedEnd;
unsigned int mWaterPressureFreeEnd;
unsigned int mWaterPressureFixedEnd;
unsigned int mAirPressureFreeEnd;
unsigned int mAirPressureFixedEnd;
unsigned int mDisplacementFreeDofs;
unsigned int mWaterPressureFreeDofs;
unsigned int mAirPressureFreeDofs;
/**
* Three-phase coupled system matrix:
* | Kuu Kuw Kua |
* | Kwu Kww Kwa |
* | Kau Kaw Kaa |
**/
TSystemMatrixPointerType mpKuu;
TSystemMatrixPointerType mpKuw;
TSystemMatrixPointerType mpKua;
TSystemMatrixPointerType mpKwu;
TSystemMatrixPointerType mpKww;
TSystemMatrixPointerType mpKwa;
TSystemMatrixPointerType mpKau;
TSystemMatrixPointerType mpKaw;
TSystemMatrixPointerType mpKaa;
/// Flag for matrix reconstruction
bool mDofSetChanged;
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for ( unsigned int i_local = 0; i_local < local_size; i_local++ )
{
unsigned int i_global = EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for ( unsigned int j_local = 0; j_local < local_size; j_local++ )
{
int j_global = EquationId[j_local];
A( i_global, j_global ) += LHS_Contribution( i_local, j_local );
}
}
}
}
//******************************************************************************************
//******************************************************************************************
inline void AddUnique( std::vector<std::size_t>& v, const std::size_t& candidate )
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && ( *i ) != candidate )
{
i++;
}
if ( i == endit )
{
v.push_back( candidate );
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition( unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions )
{
partitions.resize( number_of_threads + 1 );
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for ( unsigned int i = 1; i < number_of_threads; i++ )
partitions[i] = partitions[i-1] + partition_size ;
}
#ifdef _OPENMP
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array
)
{
KRATOS_WATCH("in Assemble, line 1745");
unsigned int local_size = LHS_Contribution.size1();
for ( unsigned int i_local = 0; i_local < local_size; i_local++ )
{
unsigned int i_global = EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
omp_set_lock( &lock_array[i_global] );
KRATOS_WATCH("in Assemble, line 1755");
b[i_global] += RHS_Contribution( i_local );
for ( unsigned int j_local = 0; j_local < local_size; j_local++ )
{
unsigned int j_global = EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
{
KRATOS_WATCH("in Assemble, line 1764");
A( i_global, j_global ) += LHS_Contribution( i_local, j_local );
}
}
omp_unset_lock( &lock_array[i_global] );
}
}
KRATOS_WATCH("in Assemble, line 1773");
}
#endif
void AllocateSpace( TSystemMatrixType& A, std::vector< std::vector<std::size_t> >& indices )
{
int NumThreads = OpenMPUtils::GetNumThreads();
PartitionVector MatrixPartition;
OpenMPUtils::DivideInPartitions( indices.size(), NumThreads, MatrixPartition );
for ( int k = 0; k < NumThreads; k++ )
{
// First touch: Make the thread that will manipulate each partition
// be the one that initializes it, so the relevant variables will
// belong to it.
#pragma omp parallel
if ( OpenMPUtils::ThisThread() == k )
{
for ( int i = MatrixPartition[k]; i < MatrixPartition[k+1]; i++ )
{
std::vector<std::size_t>& Row = indices[i];
std::sort( Row.begin(), Row.end() );
for ( std::vector<std::size_t>::iterator it = Row.begin(); it != Row.end() ; it++ )
{
A.push_back( i, *it, 0.00 );
}
Row.clear();
}
}
}
}
/// Identify non-zero tems in the system matrix
void ConstructSystemMatrix( TSystemMatrixType& A )
{
// Retrieve matrices
TSystemMatrixType& rKuu = *mpKuu;
TSystemMatrixType& rKuw = *mpKuw;
TSystemMatrixType& rKua = *mpKua;
TSystemMatrixType& rKwu = *mpKwu;
TSystemMatrixType& rKww = *mpKww;
TSystemMatrixType& rKwa = *mpKwa;
TSystemMatrixType& rKau = *mpKau;
TSystemMatrixType& rKaw = *mpKaw;
TSystemMatrixType& rKaa = *mpKaa;
PartitionVector Partition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions( A.size1(), NumThreads, Partition );
for ( int k = 0 ; k < NumThreads ; k++ )
{
// This code is serial, the pragma is here to ensure that each
// row block is assigned to the processor that will fill it
#pragma omp parallel
if ( OpenMPUtils::ThisThread() == k )
{
IndexVector Next = IndexVector( BaseType::mEquationSystemSize ) ;
//IndexVector& Next = *pNext; // Keeps track of which columns were filled
for ( unsigned int m = 0; m < BaseType::mEquationSystemSize; m++ ) Next[m] = -1;
std::size_t NumTerms = 0; // Full positions in a row
std::vector<unsigned int> UsedCols = std::vector<unsigned int>();
//std::vector<unsigned int>& UsedCols = *pUsedCols;
UsedCols.reserve( BaseType::mEquationSystemSize );
for ( int RowIndex = Partition[k] ;
RowIndex != Partition[k+1] ; RowIndex++ )
{
RowType RowKuu( rKuu, RowIndex );
RowType RowKuw( rKuw, RowIndex );
RowType RowKua( rKua, RowIndex );
RowType RowKwu( rKwu, RowIndex );
RowType RowKww( rKww, RowIndex );
RowType RowKwa( rKwa, RowIndex );
RowType RowKau( rKau, RowIndex );
RowType RowKaw( rKaw, RowIndex );
RowType RowKaa( rKaa, RowIndex );
int head = -2;
std::size_t Length = 0;
// Terms filled by Kuu
for ( typename RowType::iterator ItKuu = RowKuu.begin(); ItKuu != RowKuu.end(); ItKuu++ )
{
if ( Next[ItKuu.index()] == -1 )
{
Next[ItKuu.index()] = head;
head = ItKuu.index();
Length++;
}
}
/*
// Additional terms due to D*Inv(Diag(S))*G
for ( typename RowType::iterator ItD = RowD.begin();
ItD != RowD.end(); ItD++ )
{
RowType RowG( rG, ItD.index() );
for ( typename RowType::iterator ItG = RowG.begin();
ItG != RowG.end(); ItG++ )
{
if ( Next[ItG.index()] == -1 )
{
Next[ItG.index()] = head;
head = ItG.index();
Length++;
}
}
}
// Identify full terms for ordering
for ( std::size_t i = 0; i < Length; i++ )
{
if ( Next[head] != -1 )
{
UsedCols.push_back( head );
NumTerms++;
}
int temp = head;
head = Next[head];
// Clear 'Next' for next iteration
Next[temp] = -1;
}
*/
// Sort Column indices
SortCols( UsedCols, NumTerms );
// Store row in matrix, clean temporary variables
for ( unsigned int i = 0; i < NumTerms; i++ )
{
A.push_back( RowIndex, UsedCols[i], 0 );
}
NumTerms = 0;
UsedCols.resize( 0, false );
}
}
}
}
#ifndef _OPENMP
void AllocateSystemMatrix( TSystemMatrixType& A )
{
/* All non-zero positions in A = L - D*Inv(Diag(S))*G have to be stored.
* This method allocates the required memory based on the shapes of
* member matrices mpD (Divergence operator), mpG (Gradient Operator)
* and mpL (stabilization term)
*/
TSystemMatrixType& rG = *mpG;
TSystemMatrixType& rD = *mpD;
TSystemMatrixType& rL = *mpL;
std::size_t NumTerms = 0;
std::vector<int> mask( rL.size2(), -1 );
// Keeps track of used cols in a given row.
// When a col is used, mask[col] is filled with row num.
for ( OuterIt RowD = rD.begin1(), RowL = rL.begin1() ;
RowD != rD.end1();
RowD++, RowL++ )
{
// Find terms filled by the matrix product
for ( InnerIt ItD = RowD.begin(); ItD != RowD.end() ; ItD++ )
{
RowType RowG( rG, ItD.index2() );
for ( typename RowType::iterator ItG = RowG.begin(); ItG != RowG.end(); ItG++ )
{
if ( mask[ItG.index()] != int ( ItD.index1() ) )
// Cast to int to avoid a compilation warning, as index1() returns an unsigned int
{
mask[ItG.index()] = ItD.index1();
NumTerms++;
}
}
}
// Find extra terms introduced by matrix difference
for ( InnerIt ItL = RowL.begin(); ItL != RowL.end(); ItL++ )
{
if ( mask[ItL.index2()] != int ( ItL.index1() ) )
// Cast to int to avoid a compilation warning, as index1() returns an unsigned int
{
mask[ItL.index2()] = ItL.index1();
NumTerms++;
}
}
}
A.reserve( NumTerms );
}
#else
// we can't allocate in parallel!!
void AllocateSystemMatrix( TSystemMatrixType& A )
{}
#endif
/// Helper function for Sytem matrix functions
void SortCols(
std::vector<unsigned int>& ColList,
std::size_t& NumCols )
{
bool swap = true;
unsigned int d = NumCols;
int temp;
while ( swap || d > 1 )
{
swap = false;
d = ( d + 1 ) / 2;
for ( unsigned int i = 0; i < ( NumCols - d ); i++ )
if ( ColList[i+d] < ColList[i] )
{
temp = ColList[i+d];
ColList[i+d] = ColList[i];
ColList[i] = temp;
swap = true;
}
}
}
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class MultiPhaseBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_MULTIPHASE_BUILDER_AND_SOLVER defined */
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
#include <omp.h>
static void evolve(dcomplex u0[128][256][256],dcomplex u1[128][256][256],int t,int indexmap[128][256][256],int d[3]);
static void compute_initial_conditions(dcomplex u0[128][256][256],int d[3]);
static void ipow46(double a,int exponent,double *result);
static void setup();
static void compute_indexmap(int indexmap[128][256][256],int d[3]);
static void print_timers();
static void fft(int dir,dcomplex x1[128][256][256],dcomplex x2[128][256][256]);
static void cffts1(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18]);
static void cffts2(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18]);
static void cffts3(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18]);
static void fft_init(int n);
static void cfftz(int is,int m,int n,dcomplex x[256][18],dcomplex y[256][18]);
static void fftz2(int is,int l,int m,int n,int ny,int ny1,dcomplex u[256],dcomplex x[256][18],dcomplex y[256][18]);
static int ilog2(int n);
static void checksum(int i,dcomplex u1[128][256][256],int d[3]);
static void verify(int d1,int d2,int d3,int nt,boolean *verified,char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
int main(int argc,char **argv)
{
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
int ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[128][256][256];
static dcomplex pad1[3];
static dcomplex u1[128][256][256];
static dcomplex pad2[3];
static dcomplex u2[128][256][256];
static dcomplex pad3[3];
static int indexmap[128][256][256];
int iter;
int nthreads = 1;
double total_time;
double mflops;
boolean verified;
char class;
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i <= 6; i += 1) {
timer_clear(i);
}
setup();
compute_indexmap(indexmap,dims[2]);
compute_initial_conditions(u1,dims[0]);
fft_init(dims[0][0]);
fft(1,u1,u0);
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i <= 6; i += 1) {
timer_clear(i);
}
timer_start(0);
if (0 == 1)
timer_start(1);
compute_indexmap(indexmap,dims[2]);
compute_initial_conditions(u1,dims[0]);
fft_init(dims[0][0]);
if (0 == 1) {
timer_stop(1);
}
if (0 == 1) {
timer_start(2);
}
fft(1,u1,u0);
if (0 == 1) {
timer_stop(2);
}
for (iter = 1; iter <= niter; iter += 1) {
if (0 == 1) {
timer_start(3);
}
evolve(u0,u1,iter,indexmap,dims[0]);
if (0 == 1) {
timer_stop(3);
}
if (0 == 1) {
timer_start(2);
}
fft(- 1,u1,u2);
if (0 == 1) {
timer_stop(2);
}
if (0 == 1) {
timer_start(4);
}
checksum(iter,u2,dims[0]);
if (0 == 1) {
timer_stop(4);
}
}
verify(256,256,128,niter,&verified,&class);
{
#if defined(_OPENMP)
#endif /* _OPENMP */
/* end parallel */
}
timer_stop(0);
total_time = timer_read(0);
if (total_time != 0.0) {
mflops = 1.0e-6 * ((double )8388608) * (14.8157 + 7.19641 * log((double )8388608) + (5.23518 + 7.21113 * log((double )8388608)) * niter) / total_time;
}
else {
mflops = 0.0;
}
c_print_results("FT",class,256,256,128,niter,nthreads,total_time,mflops," floating point",verified,"3.0 structured","01 Dec 2019","(none)","(none)","-lm","(none)","(none)","(none)","randdp");
if (0 == 1)
print_timers();
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[128][256][256],dcomplex u1[128][256][256],int t,int indexmap[128][256][256],int d[3])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i;
int j;
int k;
for (k = 0; k <= d[2] - 1; k += 1) {
for (j = 0; j <= d[1] - 1; j += 1) {
for (i = 0; i <= d[0] - 1; i += 1) {
(u1[k][j][i] . real = u0[k][j][i] . real * ex[t * indexmap[k][j][i]] , u1[k][j][i] . imag = u0[k][j][i] . imag * ex[t * indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[128][256][256],int d[3])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0;
double start;
double an;
double dummy;
static double tmp[131073];
int i;
int j;
int t;
start = 314159265.0;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(1220703125.0,(zstart[0] - 1) * 2 * 256 * 256 + (ystart[0] - 1) * 2 * 256,&an);
dummy = randlc(&start,an);
ipow46(1220703125.0,2 * 256 * 256,&an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k <= dims[0][2] - 1; k += 1) {
x0 = start;
vranlc(2 * 256 * dims[0][1],&x0,1220703125.0,tmp);
t = 1;
for (j = 0; j <= dims[0][1] - 1; j += 1) {
for (i = 0; i <= 255; i += 1) {
u0[k][j][i] . real = tmp[t++];
u0[k][j][i] . imag = tmp[t++];
}
}
if (k != dims[0][2])
dummy = randlc(&start,an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a,int exponent,double *result)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy;
double q;
double r;
int n;
int n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0)
return ;
q = a;
r = 1;
n = exponent;
while(n > 1){
n2 = n / 2;
if (n2 * 2 == n) {
dummy = randlc(&q,q);
n = n2;
}
else {
dummy = randlc(&r,q);
n = n - 1;
}
}
dummy = randlc(&r,q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup()
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr;
int i;
int j;
int fstatus;
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - FT Benchmark\n\n");
niter = 6;
printf(" Size : %3dx%3dx%3d\n",256,256,128);
printf(" Iterations : %7d\n",niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
#pragma omp parallel for private (i)
for (i = 0; i <= 2; i += 1) {
dims[i][0] = 256;
dims[i][1] = 256;
dims[i][2] = 128;
}
#pragma omp parallel for private (i)
for (i = 0; i <= 2; i += 1) {
xstart[i] = 1;
xend[i] = 256;
ystart[i] = 1;
yend[i] = 256;
zstart[i] = 1;
zend[i] = 128;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = 16;
fftblockpad = 18;
if (fftblock != 16)
fftblockpad = fftblock + 3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[128][256][256],int d[3])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i;
int j;
int k;
int ii;
int ii2;
int jj;
int ij2;
int kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp parallel for private (ii,ii2,jj,ij2,kk,i,j,k)
for (i = 0; i <= dims[2][0] - 1; i += 1) {
ii = (i + 1 + xstart[2] - 2 + 256 / 2) % 256 - 256 / 2;
ii2 = ii * ii;
#pragma omp parallel for private (jj,ij2,kk,j,k) firstprivate (ii2)
for (j = 0; j <= dims[2][1] - 1; j += 1) {
jj = (j + 1 + ystart[2] - 2 + 256 / 2) % 256 - 256 / 2;
ij2 = jj * jj + ii2;
#pragma omp parallel for private (kk,k) firstprivate (ij2)
for (k = 0; k <= dims[2][2] - 1; k += 1) {
kk = (k + 1 + zstart[2] - 2 + 128 / 2) % 128 - 128 / 2;
indexmap[k][j][i] = kk * kk + ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
ap = - 4.0 * 1.0e-6 * 3.141592653589793238 * 3.141592653589793238;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= 221184; i += 1) {
ex[i] = ex[i - 1] * ex[1];
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers()
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = {" total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy "};
for (i = 0; i <= 6; i += 1) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n",i,tstrings[i],(timer_read(i)));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir,dcomplex x1[128][256][256],dcomplex x2[128][256][256])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[256][18];
dcomplex y1[256][18];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
/* x1 -> x1 */
cffts1(1,dims[0],x1,x1,y0,y1);
/* x1 -> x1 */
cffts2(1,dims[1],x1,x1,y0,y1);
/* x1 -> x2 */
cffts3(1,dims[2],x1,x2,y0,y1);
}
else {
/* x1 -> x1 */
cffts3(- 1,dims[2],x1,x1,y0,y1);
/* x1 -> x1 */
cffts2(- 1,dims[1],x1,x1,y0,y1);
/* x1 -> x2 */
cffts1(- 1,dims[0],x1,x2,y0,y1);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i;
int j;
int k;
int jj;
for (i = 0; i <= 2; i += 1) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[256][18];
dcomplex y1[256][18];
for (k = 0; k <= d[2] - 1; k += 1) {
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,j)
for (j = 0; j <= fftblock - 1; j += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= d[0] - 1; i += 1) {
y0[i][j] . real = x[k][j + jj][i] . real;
y0[i][j] . imag = x[k][j + jj][i] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz(is,logd[0],d[0],y0,y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,j)
for (j = 0; j <= fftblock - 1; j += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= d[0] - 1; i += 1) {
xout[k][j + jj][i] . real = y0[i][j] . real;
xout[k][j + jj][i] . imag = y0[i][j] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i;
int j;
int k;
int ii;
for (i = 0; i <= 2; i += 1) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[256][18];
dcomplex y1[256][18];
for (k = 0; k <= d[2] - 1; k += 1) {
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,j)
for (j = 0; j <= d[1] - 1; j += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= fftblock - 1; i += 1) {
y0[j][i] . real = x[k][j][i + ii] . real;
y0[j][i] . imag = x[k][j][i + ii] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz(is,logd[1],d[1],y0,y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,j)
for (j = 0; j <= d[1] - 1; j += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= fftblock - 1; i += 1) {
xout[k][j][i + ii] . real = y0[j][i] . real;
xout[k][j][i + ii] . imag = y0[j][i] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is,int d[3],dcomplex x[128][256][256],dcomplex xout[128][256][256],dcomplex y0[256][18],dcomplex y1[256][18])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i;
int j;
int k;
int ii;
for (i = 0; i <= 2; i += 1) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[256][18];
dcomplex y1[256][18];
for (j = 0; j <= d[1] - 1; j += 1) {
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,k)
for (k = 0; k <= d[2] - 1; k += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= fftblock - 1; i += 1) {
y0[k][i] . real = x[k][j][i + ii] . real;
y0[k][i] . imag = x[k][j][i + ii] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz(is,logd[2],d[2],y0,y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private (i,k)
for (k = 0; k <= d[2] - 1; k += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= fftblock - 1; i += 1) {
xout[k][j][i + ii] . real = y0[k][i] . real;
xout[k][j][i + ii] . imag = y0[k][i] . imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init(int n)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m;
int nu;
int ku;
int i;
int j;
int ln;
double t;
double ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0] . real = ((double )m);
u[0] . imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j += 1) {
t = 3.141592653589793238 / ln;
for (i = 0; i <= ln - 1; i += 1) {
ti = i * t;
u[i + ku] . real = cos(ti);
u[i + ku] . imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz(int is,int m,int n,dcomplex x[256][18],dcomplex y[256][18])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i;
int j;
int l;
int mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = ((int )u[0] . real);
if (is != 1 && is != - 1 || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\none of the input parameters is invalid%5d%5d%5d\n",is,m,mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l += 2) {
fftz2(is,l,m,n,fftblock,fftblockpad,u,x,y);
if (l == m)
break;
fftz2(is,l + 1,m,n,fftblock,fftblockpad,u,y,x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
#pragma omp parallel for private (i,j) firstprivate (fftblock,n)
for (j = 0; j <= n - 1; j += 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= fftblock - 1; i += 1) {
x[j][i] . real = y[j][i] . real;
x[j][i] . imag = y[j][i] . imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2(int is,int l,int m,int n,int ny,int ny1,dcomplex u[256],dcomplex x[256][18],dcomplex y[256][18])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k;
int n1;
int li;
int lj;
int lk;
int ku;
int i;
int j;
int i11;
int i12;
int i21;
int i22;
dcomplex u1;
dcomplex x11;
dcomplex x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l - 1 == 0) {
lk = 1;
}
else {
lk = 2 << l - 1 - 1;
}
if (m - l == 0) {
li = 1;
}
else {
li = 2 << m - l - 1;
}
lj = 2 * lk;
ku = li;
for (i = 0; i <= li - 1; i += 1) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1 . real = u[ku + i] . real;
u1 . imag = u[ku + i] . imag;
}
else {
u1 . real = u[ku + i] . real;
u1 . imag = -u[ku + i] . imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k <= lk - 1; k += 1) {
#pragma omp parallel for private (j) firstprivate (i11,i12,i21,i22)
for (j = 0; j <= ny - 1; j += 1) {
double x11real;
double x11imag;
double x21real;
double x21imag;
x11real = x[i11 + k][j] . real;
x11imag = x[i11 + k][j] . imag;
x21real = x[i12 + k][j] . real;
x21imag = x[i12 + k][j] . imag;
y[i21 + k][j] . real = x11real + x21real;
y[i21 + k][j] . imag = x11imag + x21imag;
y[i22 + k][j] . real = u1 . real * (x11real - x21real) - u1 . imag * (x11imag - x21imag);
y[i22 + k][j] . imag = u1 . real * (x11imag - x21imag) + u1 . imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn;
int lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while(nn < n){
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i,dcomplex u1[128][256][256],int d[3])
{
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j;
int q;
int r;
int s;
int ierr;
dcomplex chk;
dcomplex allchk;
chk . real = 0.0;
chk . imag = 0.0;
for (j = 1; j <= 1024; j += 1) {
q = j % 256 + 1;
if (q >= xstart[0] && q <= xend[0]) {
r = 3 * j % 256 + 1;
if (r >= ystart[0] && r <= yend[0]) {
s = 5 * j % 128 + 1;
if (s >= zstart[0] && s <= zend[0]) {
(chk . real = chk . real + u1[s - zstart[0]][r - ystart[0]][q - xstart[0]] . real , chk . imag = chk . imag + u1[s - zstart[0]][r - ystart[0]][q - xstart[0]] . imag);
}
}
}
}
{
sums[i] . real += chk . real;
sums[i] . imag += chk . imag;
}
{
/* complex % real */
sums[i] . real = sums[i] . real / ((double )8388608);
sums[i] . imag = sums[i] . imag / ((double )8388608);
printf("T = %5d Checksum = %22.12e %22.12e\n",i,sums[i] . real,sums[i] . imag);
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify(int d1,int d2,int d3,int nt,boolean *verified,char *class)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr;
int size;
int i;
double err;
double epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[7] = {(0.0), (5.546087004964e+02), (5.546385409189e+02), (5.546148406171e+02), (5.545423607415e+02), (5.544255039624e+02), (5.542683411902e+02)};
double vdata_imag_s[7] = {(0.0), (4.845363331978e+02), (4.865304269511e+02), (4.883910722336e+02), (4.901273169046e+02), (4.917475857993e+02), (4.932597244941e+02)};
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[7] = {(0.0), (5.673612178944e+02), (5.631436885271e+02), (5.594024089970e+02), (5.560698047020e+02), (5.530898991250e+02), (5.504159734538e+02)};
double vdata_imag_w[7] = {(0.0), (5.293246849175e+02), (5.282149986629e+02), (5.270996558037e+02), (5.260027904925e+02), (5.249400845633e+02), (5.239212247086e+02)};
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[7] = {(0.0), (5.046735008193e+02), (5.059412319734e+02), (5.069376896287e+02), (5.077892868474e+02), (5.085233095391e+02), (5.091487099959e+02)};
double vdata_imag_a[7] = {(0.0), (5.114047905510e+02), (5.098809666433e+02), (5.098144042213e+02), (5.101336130759e+02), (5.104914655194e+02), (5.107917842803e+02)};
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[21] = {(0.0), (5.177643571579e+02), (5.154521291263e+02), (5.146409228649e+02), (5.142378756213e+02), (5.139626667737e+02), (5.137423460082e+02), (5.135547056878e+02), (5.133910925466e+02), (5.132470705390e+02), (5.131197729984e+02), (5.130070319283e+02), (5.129070537032e+02), (5.128182883502e+02), (5.127393733383e+02), (5.126691062020e+02), (5.126064276004e+02), (5.125504076570e+02), (5.125002331720e+02), (5.124551951846e+02), (5.124146770029e+02)};
double vdata_imag_b[21] = {(0.0), (5.077803458597e+02), (5.088249431599e+02), (5.096208912659e+02), (5.101023387619e+02), (5.103976610617e+02), (5.105948019802e+02), (5.107404165783e+02), (5.108576573661e+02), (5.109577278523e+02), (5.110460304483e+02), (5.111252433800e+02), (5.111968077718e+02), (5.112616233064e+02), (5.113203605551e+02), (5.113735928093e+02), (5.114218460548e+02), (5.114656139760e+02), (5.115053595966e+02), (5.115415130407e+02), (5.115744692211e+02)};
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[21] = {(0.0), (5.195078707457e+02), (5.155422171134e+02), (5.144678022222e+02), (5.140150594328e+02), (5.137550426810e+02), (5.135811056728e+02), (5.134569343165e+02), (5.133651975661e+02), (5.132955192805e+02), (5.132410471738e+02), (5.131971141679e+02), (5.131605205716e+02), (5.131290734194e+02), (5.131012720314e+02), (5.130760908195e+02), (5.130528295923e+02), (5.130310107773e+02), (5.130103090133e+02), (5.129905029333e+02), (5.129714421109e+02)};
double vdata_imag_c[21] = {(0.0), (5.149019699238e+02), (5.127578201997e+02), (5.122251847514e+02), (5.121090289018e+02), (5.121143685824e+02), (5.121496764568e+02), (5.121870921893e+02), (5.122193250322e+02), (5.122454735794e+02), (5.122663649603e+02), (5.122830879827e+02), (5.122965869718e+02), (5.123075927445e+02), (5.123166486553e+02), (5.123241541685e+02), (5.123304037599e+02), (5.123356167976e+02), (5.123399592211e+02), (5.123435588985e+02), (5.123465164008e+02)};
epsilon = 1.0e-12;
*verified = 1;
*class = 'U';
if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i += 1) {
err = (sums[i] . real - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
err = (sums[i] . imag - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
}
}
else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i += 1) {
err = (sums[i] . real - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
err = (sums[i] . imag - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
}
}
else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i += 1) {
err = (sums[i] . real - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
err = (sums[i] . imag - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
}
}
else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i += 1) {
err = (sums[i] . real - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
err = (sums[i] . imag - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
}
}
else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i += 1) {
err = (sums[i] . real - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
err = (sums[i] . imag - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = 0;
break;
}
}
}
if (( *class) != 'U') {
printf("Result verification successful\n");
}
else {
printf("Result verification failed\n");
}
printf("class = %1c\n",( *class));
}
|
DRB031-truedepfirstdimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
There is a loop-carried true dependence within the outer level loop.
Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i,j;
int n=1000, m=1000;
double b[1000][1000];
#pragma omp parallel for private(j)
for (i=0; i<n; i++)
#pragma omp parallel for simd
for (j=0; j<m; j++)
b[i][j] = 0.5;
for (i=1;i<n;i++)
#pragma omp parallel for simd
for (j=1;j<m;j++)
b[i][j]=b[i-1][j-1];
#pragma omp parallel for private(j) ordered
for (i=0;i<n;i++)
#pragma omp parallel for simd ordered
for (j=0;j<m;j++)
#pragma omp ordered simd
printf("b[%d][%d]=%f\n", i, j, b[i][j]);
return 0;
}
|
GB_unop__sqrt_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fp64_fp64)
// op(A') function: GB (_unop_tran__sqrt_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sqrt (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sqrt (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sqrt (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TomoP2DModel_core.c | /*
* Copyright 2017 Daniil Kazantsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <stdlib.h>
#include <memory.h>
#include <stdio.h>
#include "omp.h"
#include "utils.h"
#define M_PI 3.14159265358979323846
#define MAXCHAR 1000
/* Functions to build spatial (2D) and temporal (2D +time) phantoms from the library of models: Phantom2DLibrary.dat
*
* Input Parameters:
* 1. ModelNo - the model number from Phantom3DLibrary file
* 2. VolumeSize in voxels (N x N)
* 3. Object - Analytical Model
* 4. C0 - intensity
* 5. x0 - x0 position
* 6. y0 - y0 position
* 7. a - size object
* 8. b - size object
* 9. phi_rot - rotation angle
*
* Output:
* 1. The analytical phantom size of [N x N] or a temporal phantom size of [N xN x Time-frames]
*/
/* function to build a single object */
float TomoP2DObject_core(float *A, int N, char *Object,
float C0, /* intensity */
float x0, /* x0 position */
float y0, /* y0 position */
float a , /* a - size object */
float b , /* b - size object */
float phi_rot, /* phi - rotation angle */
int tt /* time frame loop */)
{
printf ("Base C0 %.2e x0 %.2e y0 %.2e a %.2e b %.2e phi %.2e\n" , C0, x0, y0, a, b, phi_rot);
int i, j;
float *Tomorange_X_Ar=NULL, Tomorange_Xmin, Tomorange_Xmax, H_x, C1, a2, b2, phi_rot_radian, sin_phi, cos_phi;
float *Xdel = NULL, *Ydel = NULL, T;
Tomorange_X_Ar = malloc(N*sizeof(float));
Tomorange_Xmin = -1.0f;
Tomorange_Xmax = 1.0f;
H_x = (Tomorange_Xmax - Tomorange_Xmin)/(N);
for(i=0; i<N; i++) {Tomorange_X_Ar[i] = Tomorange_Xmin + (float)i*H_x;}
C1 = -4.0f*logf(2.0f);
/************************************************/
phi_rot_radian = phi_rot*((float)M_PI/180.0f);
sin_phi=sinf(phi_rot_radian); cos_phi=cosf(phi_rot_radian);
Xdel = malloc(N*sizeof(float));
Ydel = malloc(N*sizeof(float));
for(i=0; i<N; i++) {
Xdel[i] = Tomorange_X_Ar[i] - x0;
Ydel[i] = Tomorange_X_Ar[i] - y0;
}
a2 = 1.0f/(a*a);
b2 = 1.0f/(b*b);
/* all parameters of an object have been extracted, now run the building modules */
if (strcmp("gaussian",Object) == 0) {
/* The object is a gaussian */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = C1*(a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2));
A[tt*N*N + j*N+i] += C0*expf(T);
}}
}
else if (strcmp("parabola",Object) == 0) {
/* the object is a parabola Lambda = 1/2 */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*sqrtf(1.0f - T);
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("ellipse",Object) == 0) {
/* the object is an elliptical disk */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0;
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("parabola1",Object) == 0) {
/* the object is a parabola Lambda = 1*/
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = (4.0f*a2)*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + (4.0f*b2)*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*sqrtf(1.0f - T);
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("cone",Object) == 0) {
/*the object is a cone*/
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*(1.0f - sqrtf(T));
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("rectangle",Object) == 0) {
/* the object is a rectangle */
float x0r, y0r, HX, HY;
a2 = 0.5f*a;
b2 = 0.5f*b;
x0r=x0*cosf(0.0f) + y0*sinf(0.0f);
y0r=-x0*sinf(0.0f) + y0*cosf(0.0f);
if (phi_rot_radian < 0.0f) {
phi_rot_radian = (float)M_PI + phi_rot_radian;
sin_phi=sinf(phi_rot_radian);
cos_phi=cosf(phi_rot_radian);
}
#pragma omp parallel for shared(A) private(i,j,HX,HY,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
HX = fabsf((Xdel[i] - x0r)*sin_phi + (Ydel[j] - y0r)*cos_phi);
T = 0.0f;
if (HX <= a2) {
HY = fabsf((Ydel[j] - y0r)*sin_phi - (Xdel[i] - x0r)*cos_phi);
if (HY <= b2) {T = C0;}
}
A[tt*N*N + j*N+i] += T;
}}
}
else {
return 0;
}
free(Xdel); free(Ydel);
/************************************************/
free(Tomorange_X_Ar);
return *A;
}
float TomoP2DModel_core(float *A, int ModelSelected, int N, char *ModelParametersFilename)
{
FILE *fp = fopen(ModelParametersFilename, "r"); // read parameters file
int Model=0, Components=0, steps = 0, counter=0, ii;
float C0 = 0.0f, x0 = 0.0f, y0 = 0.0f, a = 0.0f, b = 0.0f, psi_gr1 = 0.0f;
if( fp == NULL ) {
printf("%s \n","Cannot open the model library file (Phantom2DLibrary.dat)");
}
else {
char str[MAXCHAR];
char tmpstr1[16];
char tmpstr2[22];
char tmpstr3[16];
char tmpstr4[16];
char tmpstr5[16];
char tmpstr6[16];
char tmpstr7[16];
char tmpstr8[16];
while (fgets(str, MAXCHAR, fp) != NULL)
{
/* work with non-# commented lines */
if(str[0] != '#') {
sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
if (strcmp(tmpstr1,"Model")==0)
{
Model = atoi(tmpstr2);
if ((ModelSelected == Model) && (counter == 0)) {
/* check if we have a right model */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
//mexErrMsgTxt("Unexpected the end of the line (Components) in parameters file");
break; }
if (strcmp(tmpstr1,"Components") == 0) Components = atoi(tmpstr2);
//printf("%s %i\n", "Components:", Components);
if (Components <= 0) {
// printf("%s %i\n", "Components cannot be negative, the given value is", Components);
// mexErrMsgTxt("Components cannot be negative");
break; }
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
//mexErrMsgTxt("Unexpected the end of the line (TimeSteps) in parameters file");
break; }
if (strcmp(tmpstr1,"TimeSteps") == 0) steps = atoi(tmpstr2);
if (steps <= 0) {
// printf("%s %i\n", "TimeSteps cannot be negative, the given value is", steps);
//mexErrMsgTxt("TimeSteps cannot be negative");
break; }
//printf("%s %i\n", "TimeSteps:", steps);
if (steps == 1) {
/**************************************************/
printf("\n %s %i %s \n", "Stationary 2D model", ModelSelected, " is selected");
/* loop over all components */
for(ii=0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
//mexErrMsgTxt("Unexpected the end of the line (objects loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
a = (float)atof(tmpstr6); /* a - size object */
b = (float)atof(tmpstr7); /* b - size object */
psi_gr1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
//mexErrMsgTxt("Cannot find 'Object' string in parameters file");
break; }
printf ("C0 %.2e x0 %.2e y0 %.2e a %.2e b %.2e phi %.2e\n" , C0, x0, y0, a, b, psi_gr1);
TomoP2DObject_core(A, N, tmpstr2, C0, y0, x0, a, b, psi_gr1, 0); /* python */
}
}
else {
/**************************************************/
printf("\n %s %i %s \n", "Temporal 2D+time model", ModelSelected, " is selected");
/* temporal phantom 2D + time (3D) */
float C1 = 0.0f, x1 = 0.0f, y1 = 0.0f, a1 = 0.0f, b1 = 0.0f, psi_gr1_1 = 0.0f;
/* loop over all components */
for(ii=0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
// mexErrMsgTxt("Unexpected the end of the line (objects loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
a = (float)atof(tmpstr6); /* a - size object */
b = (float)atof(tmpstr7); /* b - size object */
psi_gr1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
// mexErrMsgTxt("Cannot find 'Object' string in parameters file");
break; }
/* check Endvar relatedparameters */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
// mexErrMsgTxt("Unexpected the end of the line (Endvar loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Endvar") == 0) {
C1 = (float)atof(tmpstr3); /* intensity */
x1 = (float)atof(tmpstr4); /* x0 position */
y1 = (float)atof(tmpstr5); /* y0 position */
a1 = (float)atof(tmpstr6); /* a - size object */
b1 = (float)atof(tmpstr7); /* b - size object */
psi_gr1_1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
printf("%s\n", "Cannot find 'Endvar' string in parameters file");
break; }
/*now we know the initial parameters of the object and the final ones. We linearly extrapolate to establish steps and coordinates. */
/* calculating the full distance berween the start and the end points */
float distance = sqrtf(pow((x1 - x0),2) + pow((y1 - y0),2));
float d_dist = distance/(steps-1); /*a step over line */
float C_step = (C1 - C0)/(steps-1);
float a_step = (a1 - a)/(steps-1);
float b_step = (b1 - b)/(steps-1);
float phi_rot_step = (psi_gr1_1 - psi_gr1)/(steps-1);
int tt;
float x_t, y_t, a_t, b_t, C_t, phi_t, d_step;
/* initialize */
x_t = x0; y_t = y0; a_t = a; b_t = b; C_t = C0; phi_t = psi_gr1; d_step = d_dist;
/*loop over time frames*/
for(tt=0; tt < steps; tt++) {
TomoP2DObject_core(A, N, tmpstr2, C_t, x_t, -y_t, a_t, b_t, phi_t, tt); /* python */
/* calculating new coordinates of an object */
if (distance != 0.0f) {
float t = d_step/distance;
x_t = (1-t)*x0 + t*x1;
y_t = (1-t)*y0 + t*y1; }
else {
x_t = x0;
y_t = y0; }
d_step += d_dist;
a_t += a_step;
b_t += b_step;
C_t += C_step;
phi_t += phi_rot_step;
} /*time steps*/
} /*components loop*/
}
counter++;
}
}
}
}
}
fclose(fp);
return *A;
}
|
lud_omp.c | #include <stdio.h>
#include <omp.h>
extern int omp_num_threads;
void lud_omp(float *a, int size)
{
int i,j,k;
float sum;
printf("num of threads = %d\n", omp_num_threads);
for (i=0; i <size; i++){
omp_set_num_threads(omp_num_threads);
#pragma omp parallel for default(none) \
private(j,k,sum) shared(size,i,a)
for (j=i; j <size; j++){
sum=a[i*size+j];
for (k=0; k<i; k++) sum -= a[i*size+k]*a[k*size+j];
a[i*size+j]=sum;
}
#pragma omp parallel for default(none) \
private(j,k,sum) shared(size,i,a)
for (j=i+1;j<size; j++){
sum=a[j*size+i];
for (k=0; k<i; k++) sum -=a[j*size+k]*a[k*size+i];
a[j*size+i]=sum/a[i*size+i];
}
}
}
|
GB_unop__acos_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fp64_fp64)
// op(A') function: GB (_unop_tran__acos_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acos (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acos (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acos (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB065-pireduction-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic PI calculation using reduction
*/
#define num_steps 2000000000
#include <stdio.h>
#include <omp.h>
int main(int argc,char **argv)
{
double pi = 0.0;
long i;
double x;
double interval_width;
interval_width = 1.0 / ((double )2000000000);
#pragma omp parallel for private (x,i) reduction (+:pi)
for (i = 0; i <= ((long )2000000000) - 1; i += 1) {
x = (i + 0.5) * interval_width;
pi += 1.0 / (x * x + 1.0);
}
pi = pi * 4.0 * interval_width;
printf("PI=%f\n",pi);
return 0;
}
|
requantize_leakyrelu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
int i = 0;
for (; i + 3 < size; i += 4)
{
__builtin_prefetch(intptr0 + 64);
__builtin_prefetch(intptr1 + 64);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
_v00 = __msa_fmul_w(_v00, _scale0);
_v01 = __msa_fmul_w(_v01, _scale0);
_v02 = __msa_fmul_w(_v02, _scale0);
_v03 = __msa_fmul_w(_v03, _scale0);
_v10 = __msa_fmul_w(_v10, _scale1);
_v11 = __msa_fmul_w(_v11, _scale1);
_v12 = __msa_fmul_w(_v12, _scale1);
_v13 = __msa_fmul_w(_v13, _scale1);
*((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
*((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
*((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v02, _v12, _slope);
*((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v03, _v13, _slope);
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
for (; i < size; i++)
{
__builtin_prefetch(intptr0 + 16);
__builtin_prefetch(intptr1 + 16);
v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
_v0 = __msa_fmul_w(_v0, _scale0);
_v1 = __msa_fmul_w(_v1, _scale1);
*((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
_bias0 = __msa_fmul_w(_bias0, _scale_out0);
_bias1 = __msa_fmul_w(_bias1, _scale_out1);
v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
int i = 0;
for (; i + 3 < size; i += 4)
{
__builtin_prefetch(intptr0 + 64);
__builtin_prefetch(intptr1 + 64);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
_v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
_v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
_v02 = __msa_fmadd_w(_bias0, _v02, _scale0);
_v03 = __msa_fmadd_w(_bias0, _v03, _scale0);
_v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
_v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
_v12 = __msa_fmadd_w(_bias1, _v12, _scale1);
_v13 = __msa_fmadd_w(_bias1, _v13, _scale1);
*((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
*((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
*((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v02, _v12, _slope);
*((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v03, _v13, _slope);
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
for (; i + 1 < size; i += 2)
{
__builtin_prefetch(intptr0 + 32);
__builtin_prefetch(intptr1 + 32);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
_v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
_v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
_v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
_v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
*((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
*((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
intptr0 += 8;
intptr1 += 8;
ptr += 16;
}
for (; i < size; i++)
{
__builtin_prefetch(intptr0 + 16);
__builtin_prefetch(intptr1 + 16);
v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
_v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
_v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
*((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
int i = 0;
for (; i < size; i++)
{
__builtin_prefetch(intptr + 16);
v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
_v = __msa_fmul_w(_v, _scale);
v16i8 v = float2int8leakyrelu(_v, _slope);
ptr0[0] = v[0];
ptr1[0] = v[1];
ptr2[0] = v[2];
ptr3[0] = v[3];
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 4, 0);
v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
_bias = __msa_fmul_w(_bias, _scale_out);
v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
int i = 0;
for (; i < size; i++)
{
__builtin_prefetch(intptr + 16);
v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
_v = __msa_fmadd_w(_bias, _v, _scale);
v16i8 v = float2int8leakyrelu(_v, _slope);
ptr0[0] = v[0];
ptr1[0] = v[1];
ptr2[0] = v[2];
ptr3[0] = v[3];
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses OpenMP context selectors and calls \p Callback for each
/// successfully parsed context selector.
bool
parseOpenMPContextSelectors(SourceLocation Loc,
llvm::function_ref<void(SourceRange)> Callback);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
GB_unaryop__identity_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint16
// op(A') function: GB_tran__identity_fp64_uint16
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint16
(
double *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double split_threshold(int split_idx) const { return threshold_[split_idx]; }
inline int split_left_child(int split_idx) const { return left_child_[split_idx]; }
inline int split_right_child(int split_idx) const { return right_child_[split_idx]; }
inline int8_t split_decision_type(int split_idx) const { return decision_type_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
kCDensestMem.c | /*
Info:
This program corresponds to the exact algorithm in the PVLDB 2020 paper.
Feel free to use these lines as you wish.
This program enumerates all k-cliques, store them in main memory, and apply
the "++" operator repeatedly to find the k-clique densest subgraph, until the
suspected k-clique densest subgraph passes the optimality test based on either
the improved Goldberg's condition or a max-flow.
This program can handle both the case k = 2 (where all edges are treated as
the k-cliques) and the case k >= 3 (where the subroutine to list all k-cliques,
kClist, is executed once). Note again that all k-cliques are stored in main
memory, consuming super-linear space. One advantage, however, is that we can
shuffle all the cliques to prevent the cliques containing the same node from
coming in batch.
To compile:
"g++ kCDensestMem.c BinaryHeap.c Graph.c MaxFlow.cpp -O3 -o kCDensestMem -lm -fopenmp"
To execute:
"./kCDensestMem p k edgeListFileName tag"
p is the number of threads.
k is the size of a clique considered as in "k-clique".
edgeListFileName is the name of the file that contains the graph. Each line of
the file contains one edge represented by two integers separated by a space.
tag is a string specifying the dataset (e.g., "dblp"), which is used to
generate the output file name.
Output:
A series of suspected k-clique densest subgraphs. One record per line,
containing
- number of iterations of sequential updates run so far (always a power of 2);
- the number of nodes in the suspected k-clique densest subset;
- the k-clique density of the suspected k-clique densest subset;
- the time elapsed since the beginning of the execution.
When the exact solution is eventually found, the program additionally prints
- the number of edges in the k-clique densest subgraph;
- the optimality test that is passed ("Goldberg" or "Max Flow");
- the number of max-flow calls.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <limits.h>
#include "Graph.h"
#include "MaxFlow.hpp"
unsigned MAX_CLIQUES = 100000000; // Maximum number of cliques for memory allocation; will increase if needed
static int UnsignedCmp(const void *a, const void *b) {
return (long long)*(unsigned *)a - (long long)*(unsigned *)b;
}
Subgraph *AllocSubgraph(Graph *g, unsigned char k) {
Subgraph *sg = (Subgraph *)malloc(sizeof(Subgraph));
sg->n = (unsigned *)calloc(k, sizeof(unsigned));
sg->d = (unsigned **)malloc(k * sizeof(unsigned *));
sg->adj = (unsigned *)malloc(g->core * g->core * sizeof(unsigned));
sg->label = (unsigned char *)calloc(g->core, sizeof(unsigned char));
sg->nodes = (unsigned **)malloc(k * sizeof(unsigned *));
sg->core = g->core;
for (unsigned i = 1; i < k; ++i){
sg->d[i] = (unsigned *)malloc(g->core * sizeof(unsigned));
sg->nodes[i] = (unsigned *)malloc(g->core * sizeof(unsigned));
}
return sg;
}
static unsigned *id_sg2g = NULL, *id_g2sg = NULL; // to improve (???)
#pragma omp threadprivate(id_g2sg, id_sg2g)
void MakeSubgraph(Graph *g, unsigned u, unsigned v, Subgraph *sg, unsigned char k) {
if (id_sg2g == NULL){
id_g2sg = (unsigned *)malloc(g->n * sizeof(unsigned));
id_sg2g = (unsigned *)malloc(g->core * sizeof(unsigned));
for (unsigned i = 0; i < g->n; ++i) {
id_g2sg[i] = UINT_MAX;
}
}
for (unsigned i = 0; i < sg->n[k - 1]; ++i) {
sg->label[i] = 0;
}
for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) { // For each out-neighbor of v
id_g2sg[g->adj[i]] = UINT_MAX - 1;
}
unsigned j = 0;
for (unsigned i = g->cd[u]; i < g->cd[u + 1]; ++i) { // For each out-neighbor of u
unsigned x = g->adj[i];
if (id_g2sg[x] == UINT_MAX - 1) {
id_g2sg[x] = j;
id_sg2g[j] = x;
sg->label[j] = k - 2;
sg->nodes[k - 2][j] = j;
sg->d[k - 2][j] = 0; // New degrees
++j;
}
}
sg->n[k - 2] = j;
for (unsigned i = 0; i < sg->n[k - 2]; ++i) { // Reorder adjacency list and compute new degrees
unsigned x = id_sg2g[i];
for (unsigned l = g->cd[x]; l < g->cd[x + 1]; ++l) {
unsigned y = g->adj[l];
j = id_g2sg[y];
if (j < UINT_MAX - 1) {
sg->adj[sg->core * i + sg->d[k - 2][i]++] = j;
}
}
}
for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) {
id_g2sg[g->adj[i]] = -1;
}
}
// Clique-density-friendly decomposition
unsigned *cknodes; // Nodes of a clique
#pragma omp threadprivate(cknodes)
double *rho;
double *alpha;
double *rho_tentative;
unsigned *level;
unsigned *reordered;
unsigned *ck; // List of all cliques
unsigned *p_ckend; // Pointer to the end of ck[]
unsigned long long cnt_clique;
typedef enum {FRANK_WOLFE = 2,
PAVA_PREPROCESS = 3} task_t;
void AllocCdf(Graph *g, unsigned k) {
rho = (double *)malloc(g->n * sizeof(double));
rho_tentative = (double *)malloc(g->n * sizeof(double));
level = (unsigned *)malloc(g->n * sizeof(unsigned));
reordered = (unsigned *)malloc(g->n * sizeof(unsigned));
}
inline int CDF_RerunFrankWolfeCmp(const unsigned u, const unsigned v) {
if (level[u] < level[v]) return -1;
if (level[u] > level[v]) return 1;
if (rho[u] > rho[v]) return -1; // A node with larger rho value is "smaller"!
if (rho[u] < rho[v]) return 1;
return 0;
}
void CDF_FrankWolfeUpdateRates(int clique_size, unsigned *p_cknodes, double *p_alpha) {
// Water-filling
/*for (unsigned i = clique_size; i > 0; --i)
for (unsigned j = 0; j + 1 < i; ++j)
if (rho[cknodes[j]] > rho[cknodes[j + 1]]) {
unsigned tmp = cknodes[j];
cknodes[j] = cknodes[j + 1];
cknodes[j + 1] = tmp;
}
double budget = 1.0;
for (unsigned i = 0; i < clique_size; ++i) {
double val = budget / (i + 1);
if (i + 1 < clique_size && (rho[cknodes[i + 1]] - rho[cknodes[i]]) * (i + 1) < budget)
val = rho[cknodes[i + 1]] - rho[cknodes[i]];
for (unsigned j = 0; j <= i; ++j) {
#pragma omp atomic
rho[cknodes[j]] += val;
}
budget -= val * (i + 1);
}*/
unsigned node_index = 0;
for (unsigned i = 1; i < clique_size; ++i) {
if (rho[p_cknodes[node_index]] > rho[p_cknodes[i]])
node_index = i;
}
#pragma omp atomic
rho[p_cknodes[node_index]] += 1.0;
#pragma omp atomic
p_alpha[node_index] += 1.0;
}
void CDF_PavaPreprocessUpdateRates(int clique_size, unsigned *cknodes) {
unsigned node_getting_weight = cknodes[0];
for (unsigned l = 1; l < clique_size; ++l)
if (level[cknodes[l]] > level[node_getting_weight])
node_getting_weight = cknodes[l];
#pragma omp atomic
rho_tentative[level[node_getting_weight]] += 1.0;
}
void CDF_CliqueScan(Graph *g, unsigned char k, task_t task) {
#pragma omp parallel for
for (unsigned long long i = 0; i < cnt_clique; ++i) {
// for (unsigned j = 0; j < k; ++j)
// cknodes[j] = ck[i * k + j];
switch (task) {
case FRANK_WOLFE: {
CDF_FrankWolfeUpdateRates(k, ck + i * k, alpha + i * k);
break;
}
case PAVA_PREPROCESS: {
CDF_PavaPreprocessUpdateRates(k, ck + i * k);
break;
}
}
}
}
void CDF_CliqueEnumThread(Subgraph *sg,
unsigned char clique_size,
unsigned char l) {
if (clique_size == 3) {
for (unsigned i = 0; i < sg->n[1]; ++i) {
unsigned u = sg->nodes[1][i];
cknodes[0] = id_sg2g[u];
#pragma omp critical
{
if (cnt_clique >= MAX_CLIQUES) {
MAX_CLIQUES *= 2;
ck = (unsigned *)realloc(ck, MAX_CLIQUES * clique_size * sizeof(unsigned));
p_ckend = ck + cnt_clique * clique_size;
}
for (unsigned j = 0; j < clique_size; ++j)
*(p_ckend++) = cknodes[j];
++cnt_clique;
}
}
return;
}
if (l == 2) {
for (unsigned i = 0; i < sg->n[2]; ++i) {
unsigned u = sg->nodes[2][i];
cknodes[1] = id_sg2g[u];
for (unsigned j = u * sg->core, end = u * sg->core + sg->d[2][u]; j < end; ++j) {
unsigned v = sg->adj[j];
cknodes[0] = id_sg2g[v];
#pragma omp critical
{
if (cnt_clique >= MAX_CLIQUES) {
MAX_CLIQUES *= 2;
ck = (unsigned *)realloc(ck, MAX_CLIQUES * clique_size * sizeof(unsigned));
p_ckend = ck + cnt_clique * clique_size;
}
for (unsigned k = 0; k < clique_size; ++k)
*(p_ckend++) = cknodes[k];
++cnt_clique;
}
}
}
return;
}
for (unsigned i = 0; i < sg->n[l]; ++i) { // Enumerate in reverse order. Very confusing! "++i" is actually the reverse order.
unsigned u = sg->nodes[l][i];
cknodes[l - 1] = id_sg2g[u];
sg->n[l - 1] = 0;
unsigned end = u * sg->core + sg->d[l][u];
for (unsigned j = u * sg->core; j < end; ++j) { // Relabel nodes and forming U'.
unsigned v = sg->adj[j];
if (sg->label[v] == l) {
sg->label[v] = l - 1;
sg->nodes[l - 1][sg->n[l - 1]++] = v;
sg->d[l - 1][v] = 0; // New degrees
}
}
for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Reorder adjacency list and compute new degrees
unsigned v = sg->nodes[l - 1][j];
for (unsigned k = sg->core * v, end = sg->core * v + sg->d[l][v]; k < end; ++k) {
unsigned w = sg->adj[k];
if (sg->label[w] == l - 1) {
++sg->d[l - 1][v];
}
else{
sg->adj[k--] = sg->adj[--end];
sg->adj[end] = w;
}
}
qsort(sg->adj + sg->core * v, sg->d[l - 1][v], sizeof(unsigned), UnsignedCmp); // Sort the nodes in reverse order
}
CDF_CliqueEnumThread(sg, clique_size, l - 1);
for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Restore labels
unsigned v = sg->nodes[l - 1][j];
sg->label[v] = l;
}
}
}
void CDF_CliqueEnum(Graph *g, unsigned char k) {
Subgraph *sg;
cnt_clique = 0;
p_ckend = ck = (unsigned *)malloc(MAX_CLIQUES * k * sizeof(unsigned));
#pragma omp parallel private(sg) reduction(+: cnt_clique)
{
cknodes = (unsigned *)malloc(k * sizeof(unsigned));
sg = AllocSubgraph(g, k);
#pragma omp for schedule(dynamic, 1) nowait
for(unsigned i = 0; i < g->e; ++i) {
cknodes[k - 1] = g->edges[i].s;
cknodes[k - 2] = g->edges[i].t;
MakeSubgraph(g, g->edges[i].s, g->edges[i].t, sg, k);
CDF_CliqueEnumThread(sg, k, k - 2);
}
FreeSubgraph(sg, k);
}
ck = (unsigned *)realloc(ck, cnt_clique * k * sizeof(unsigned));
alpha = (double *)malloc(cnt_clique * k * sizeof(double));
}
static int CDF_NodeCmp(const void *a, const void *b) {
double d = rho[*(const unsigned *)a] - rho[*(const unsigned *)b];
if (d > 0) return -1;
if (d < 0) return 1;
return 0;
}
typedef struct {
unsigned n; // Total number of aggregated points
unsigned *nag; // nag[i]: number of points aggregated in i
double *val; // val[i]: value of the aggregated points
// double *ub;
} IsotonicRegression;
// Pool Adjacent Violators Algorithm. Values to fit are stored in vect and n is the size of vect.
IsotonicRegression *CDF_Pava(double *vect, unsigned n) {
IsotonicRegression *fit = (IsotonicRegression *)malloc(sizeof(IsotonicRegression));
unsigned *nag = (unsigned *)malloc(n * sizeof(unsigned));
double *val = (double *)malloc(n * sizeof(double));
nag[0] = 1;
val[0] = vect[0];
unsigned j = 0;
for (unsigned i = 1; i < n; ++i) {
j += 1;
val[j] = vect[i];
nag[j] = 1;
while (j > 0 && val[j] >= val[j - 1] * 0.999999) {
val[j - 1] = (nag[j] * val[j] + nag[j - 1] * val[j - 1]) / (nag[j] + nag[j - 1]);
nag[j - 1] += nag[j];
--j;
}
}
fit->n = j + 1;
fit->nag = nag;
fit->val = val;
return fit;
}
IsotonicRegression *CDF_PavaPreprocess(Graph *g, unsigned char k) {
for (unsigned i = 0; i < g->n; ++i)
reordered[i] = i;
qsort(reordered, g->n, sizeof(unsigned), CDF_NodeCmp); // Reorder the nodes by decreasing rho values
for (unsigned i = 0; i < g->n; ++i)
level[reordered[i]] = i;
CDF_CliqueScan(g, k, PAVA_PREPROCESS);
IsotonicRegression *partition = CDF_Pava(rho_tentative, g->n);
for (unsigned j = 0, i = 0; j < partition->n; ++j)
for (unsigned l = 0; l < partition->nag[j]; ++l, ++i)
level[reordered[i]] = j;
return partition;
}
bool CDF_CheckStability(Graph *g, unsigned subset_size, const unsigned char k) {
for (unsigned i = 0; i < cnt_clique; ++i) {
unsigned max_level = 0, max_level_cnt = 0;
for (unsigned j = 0; j < k; ++j) {
if (level[ck[i * k + j]] > max_level) {
max_level = level[ck[i * k + j]];
max_level_cnt = 1;
} else if (level[ck[i * k + j]] == max_level) {
++max_level_cnt;
}
}
double sum = 0;
for (unsigned j = 0; j < k; ++j) {
if (level[ck[i * k + j]] < max_level) {
sum += alpha[i * k + j];
#pragma omp atomic
rho[ck[i * k + j]] -= alpha[i * k + j];
alpha[i * k + j] = 0;
}
}
for (unsigned j = 0; j < k; ++j) {
if (level[ck[i * k + j]] == max_level) {
#pragma omp atomic
rho[ck[i * k + j]] += sum / max_level_cnt;
alpha[i * k + j] += sum / max_level_cnt;
}
}
}
double prefix_min_rho = rho[reordered[0]];
double suffix_max_rho = -1;
for (unsigned i = 1; i < subset_size; ++i)
if (prefix_min_rho > rho[reordered[i]])
prefix_min_rho = rho[reordered[i]];
for (unsigned i = g->n - 1; i >= subset_size; --i)
if (suffix_max_rho < rho[reordered[i]])
suffix_max_rho = rho[reordered[i]];
return prefix_min_rho * 0.999999 > suffix_max_rho;
}
bool CDF_CheckDensestGoldberg(Graph *g, const unsigned n, const unsigned char clique_size) {
qsort(reordered, n, sizeof(unsigned), CDF_NodeCmp); // Reorder the nodes by decreasing rho values
unsigned long long m = 0;
for (unsigned j = 0; j < n; ++j)
m += (unsigned long long)(rho_tentative[j] + 0.5);
double sum_rho = 0;
double jck = 0; // j choose k
bool skip = true;
for (unsigned j = 1; j < n; ++j) {
sum_rho += rho[reordered[j - 1]];
if (skip) {
if (j == clique_size)
jck = 1;
else if (j > clique_size)
jck = (jck * j) / (j - clique_size);
if (jck / j > (double)m / (double)n)
skip = false, fprintf(stderr, "Jump to j = %u\n", j);
else
continue;
}
double ub = sum_rho / j;
if (ub - (double)m / (double)n >= 1.0 / n / j &&
ub - (double)m / (double)n >= (ceil((double)m * j / n) - (double)m * j / n) / j) {
return false;
}
}
return true;
}
bool CDF_CheckDensestMaxFlow(Graph *g, const unsigned n, const unsigned char clique_size, unsigned *p_cnt_max_flow) {
++(*p_cnt_max_flow);
Network network;
unsigned *id_in_network = (unsigned *)malloc(g->n * sizeof(unsigned));
unsigned long long m = 0;
vector<Network::Vertex> R;
Network::Vertex s = network.AddVertex(), t = network.AddVertex();
for (unsigned i = 0; i < g->n; ++i)
id_in_network[i] = n;
for (unsigned i = 0; i < n; ++i) {
id_in_network[reordered[i]] = i;
R.push_back(network.AddVertex());
}
for (unsigned i = 0; i < cnt_clique; ++i) {
bool flag = true;
for (unsigned j = 0; j < clique_size; ++j) {
if (id_in_network[ck[i * clique_size + j]] >= n) {
flag = false;
break;
}
}
if (flag) {
++m;
Network::Vertex v = network.AddVertex();
for (unsigned j = 0; j < clique_size; ++j)
network.AddEdge(v, R[id_in_network[ck[i * clique_size + j]]], n);
network.AddEdge(s, v, n);
}
}
for (unsigned i = 0; i < n; ++i)
network.AddEdge(R[i], t, m);
free(id_in_network);
return network.MaxFlow(s, t) >= m * n;
}
void ShuffleCliques(const unsigned k) {
for (unsigned i = 1; i < cnt_clique; ++i) {
unsigned rand_index = rand() % (i + 1);
for (unsigned j = 0; j < k; ++j) {
unsigned temp = ck[i * k + j];
ck[i * k + j] = ck[rand_index * k + j];
ck[rand_index * k + j] = temp;
}
}
}
void CDF_Main(const unsigned char k, Graph *g, FILE *ofp, time_t t0) {
unsigned cnt_max_flow = 0;
fprintf(ofp, "[Number of Iterations]\t[Number of Nodes]\t[k-Clique Density]\t[Time (seconds)]\t[Info]\n");
AllocCdf(g, k);
if (k >= 3) {
CDF_CliqueEnum(g, k); // Collect all k-cliques
} else {
ck = (unsigned *)malloc((unsigned long long)(g->e) * k * sizeof(unsigned));
alpha = (double *)malloc(g->e * k * sizeof(double));
cnt_clique = g->e;
for (unsigned long long i = 0; i < (unsigned long long)(g->e); ++i) {
ck[i << 1] = g->edges[i].s;
ck[(i << 1) + 1] = g->edges[i].t;
}
}
ShuffleCliques(k);
for (unsigned i = 0; i < g->n; ++i)
rho[i] = 0;
for (unsigned long long i = 0; i < cnt_clique * k; ++i)
alpha[i] = 0;
for (unsigned num_iter = 1; ; num_iter <<= 1) {
fprintf(stderr, "Start: number of iterations = %u.\n", num_iter);
// Step 1: run the Frank-Wolfe based algorithm for num_iter rounds
for (unsigned t = num_iter / 2 + 1; t <= num_iter; ++t) {
if (t % 10 == 0)
fprintf(stderr, "Run round %u...\n", t);
CDF_CliqueScan(g, k, FRANK_WOLFE);
}
// Step 2: give a tentative decomposition
for (unsigned i = 0; i < g->n; ++i)
rho_tentative[i] = 0;
IsotonicRegression *partition = CDF_PavaPreprocess(g, k);
fprintf(stderr, "Approximate densest subgraph: %u nodes, density = %f.\n", partition->nag[0], partition->val[0]);
/* FILE *ofp = fopen("rates.txt", "w");
for (unsigned i = 0; i < g->n; ++i)
fprintf(ofp, "r[%u] = %.12f\n", reordered[i], rho[reordered[i]]);
fclose(ofp);*/
// Step 3: Check stability and optimality
if (CDF_CheckStability(g, partition->nag[0], k)) {
fprintf(stderr, "The potential densest set is stable!\n");
if (CDF_CheckDensestGoldberg(g, partition->nag[0], k)) {
fprintf(stderr, "The first %u nodes forms a densest subgraph by criteria A!\n", partition->nag[0]);
fprintf(ofp, "[Number of Iterations]\t[Stopping Condition]\t[Number of Nodes]\t[Number of Edges]\t[k-Clique Density]\t[Number of Max-Flow Calls]\t[Time (seconds)]\n");
fprintf(ofp, "%u\tGoldberg\t%u\t%u\t%.12f\t%u\t", num_iter, partition->nag[0], CountEdges(g, partition->nag[0], reordered), partition->val[0], cnt_max_flow);
break;
}
else if (CDF_CheckDensestMaxFlow(g, partition->nag[0], k, &cnt_max_flow)) {
fprintf(stderr, "The first %u nodes forms a densest subgraph by criteria B!\n", partition->nag[0]);
fprintf(ofp, "[Number of Iterations]\t[Stopping Condition]\t[Number of Nodes]\t[Number of Edges]\t[k-Clique Density]\t[Number of Max-Flow Calls]\t[Time (seconds)]\n");
fprintf(ofp, "%u\tMax Flow\t%u\t%u\t%.12f\t%u\t", num_iter, partition->nag[0], CountEdges(g, partition->nag[0], reordered), partition->val[0], cnt_max_flow);
break;
}
else {
fprintf(stderr, "Cannot guarantee it is densest by either criteria A or criteria B.\n");
fprintf(ofp, "%u\t%u\t%.12f\t%ld\tSTABLE BUT NOT DENSEST\n", num_iter, partition->nag[0], partition->val[0], time(NULL) - t0);
}
} else {
fprintf(stderr, "The potential densest subset is not stable!\n");
fprintf(ofp, "%u\t%u\t%.12f\t%ld\tNOT STABLE\n", num_iter, partition->nag[0], partition->val[0], time(NULL) - t0);
}
/* ofp = fopen("rates_rerun.txt", "w");
for (int i = 0; i < partition->nag[0]; ++i)
fprintf(ofp, "r[%u] = %.12f\n", reordered[i], rho[reordered[i]]);
fclose(ofp);*/
}
}
int main(int argc, char **argv) {
EdgeList *el;
Graph *g;
unsigned char k = atoi(argv[2]);
char *file_name = argv[3];
unsigned num_threads = atoi(argv[1]);
omp_set_num_threads(num_threads);
time_t t0, t1, t2;
t0 = t1 = time(NULL);
printf("Reading edgelist from file %s\n", file_name);
el = ReadEdgeList(file_name);
printf("Number of nodes = %u\n", el->n);
printf("Number of edges = %u\n", el->e);
t2 = time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60));
t1 = t2;
printf("Building the graph structure\n");
SortByCore(el); // Do core decomposition and render degeneracy ordering to the nodes
Relabel(el);
g = MakeGraph(el);
printf("Number of nodes (degree > 0) = %u\n", g->n);
t2 = time(NULL);
printf("- Time = %ldh%ldm%lds\n", (t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60));
t1 = t2;
printf("Iterate over all cliques\n");
char output_file_name[100] = "stat_exact_";
strcat(output_file_name, argv[4]);
strcat(output_file_name, "_");
strcat(output_file_name, argv[1]);
strcat(output_file_name, "_");
strcat(output_file_name, argv[2]);
strcat(output_file_name, ".txt");
FILE *ofp = fopen(output_file_name, "w");
try {
CDF_Main(k, g, ofp, t0);
} catch(std::exception &e) {
fprintf(ofp, "%s\n", e.what());
}
printf("Number of %u-cliques: %llu\n", k, cnt_clique);
t2 = time(NULL);
printf("- Time = %ldh%ldm%lds\n", (t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60));
t1 = t2;
FreeGraph(g);
printf("- Overall time = %ldh%ldm%lds\n", (t2 - t0) / 3600, ((t2 - t0) % 3600) / 60, ((t2 - t0) % 60));
fprintf(ofp, "%ld\n", t2 - t0);
fclose(ofp);
return 0;
}
|
analyse.c | /*****************************************************************************
* analyse.c: h264 encoder library
*****************************************************************************
* Copyright (C) 2003-2008 x264 project
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
* Loren Merritt <lorenm@u.washington.edu>
* Jason Garrett-Glaser <darkshikari@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include <math.h>
#include <omp.h>
#include <limits.h>
#ifndef _MSC_VER
#include <unistd.h>
#endif
#include "common/common.h"
#include "macroblock.h"
#include "me.h"
#include "ratecontrol.h"
#include "analyse.h"
#include "rdo.c"
typedef struct
{
/* 16x16 */
int i_ref;
int i_rd16x16;
x264_me_t me16x16;
/* 8x8 */
int i_cost8x8;
/* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */
DECLARE_ALIGNED_4( int16_t mvc[32][5][2] );
x264_me_t me8x8[4];
/* Sub 4x4 */
int i_cost4x4[4]; /* cost per 8x8 partition */
x264_me_t me4x4[4][4];
/* Sub 8x4 */
int i_cost8x4[4]; /* cost per 8x8 partition */
x264_me_t me8x4[4][2];
/* Sub 4x8 */
int i_cost4x8[4]; /* cost per 8x8 partition */
x264_me_t me4x8[4][2];
/* 16x8 */
int i_cost16x8;
x264_me_t me16x8[2];
/* 8x16 */
int i_cost8x16;
x264_me_t me8x16[2];
} x264_mb_analysis_list_t;
typedef struct
{
/* conduct the analysis using this lamda and QP */
int i_lambda;
int i_lambda2;
int i_qp;
int16_t *p_cost_mv;
int i_mbrd;
/* I: Intra part */
/* Take some shortcuts in intra search if intra is deemed unlikely */
int b_fast_intra;
int b_try_pskip;
/* Luma part */
int i_satd_i16x16;
int i_satd_i16x16_dir[7];
int i_predict16x16;
int i_satd_i8x8;
int i_satd_i8x8_dir[12][4];
int i_predict8x8[4];
int i_satd_i4x4;
int i_predict4x4[16];
int i_satd_pcm;
/* Chroma part */
int i_satd_i8x8chroma;
int i_satd_i8x8chroma_dir[4];
int i_predict8x8chroma;
/* II: Inter part P/B frame */
x264_mb_analysis_list_t l0;
x264_mb_analysis_list_t l1;
int i_cost16x16bi; /* used the same ref and mv as l0 and l1 (at least for now) */
int i_cost16x16direct;
int i_cost8x8bi;
int i_cost8x8direct[4];
int i_cost16x8bi;
int i_cost8x16bi;
int i_rd16x16bi;
int i_rd16x16direct;
int i_rd16x8bi;
int i_rd8x16bi;
int i_rd8x8bi;
int i_mb_partition16x8[2]; /* mb_partition_e */
int i_mb_partition8x16[2];
int i_mb_type16x8; /* mb_class_e */
int i_mb_type8x16;
int b_direct_available;
} x264_mb_analysis_t;
/* lambda = pow(2,qp/6-2) */
const int x264_lambda_tab[52] = {
1, 1, 1, 1, 1, 1, 1, 1, /* 0-7 */
1, 1, 1, 1, /* 8-11 */
1, 1, 1, 1, 2, 2, 2, 2, /* 12-19 */
3, 3, 3, 4, 4, 4, 5, 6, /* 20-27 */
6, 7, 8, 9,10,11,13,14, /* 28-35 */
16,18,20,23,25,29,32,36, /* 36-43 */
40,45,51,57,64,72,81,91 /* 44-51 */
};
/* lambda2 = pow(lambda,2) * .9 * 256 */
const int x264_lambda2_tab[52] = {
14, 18, 22, 28, 36, 45, 57, 72, /* 0 - 7 */
91, 115, 145, 182, 230, 290, 365, 460, /* 8 - 15 */
580, 731, 921, 1161, 1462, 1843, 2322, 2925, /* 16 - 23 */
3686, 4644, 5851, 7372, 9289, 11703, 14745, 18578, /* 24 - 31 */
23407, 29491, 37156, 46814, 58982, 74313, 93628, 117964, /* 32 - 39 */
148626, 187257, 235929, 297252, 374514, 471859, 594505, 749029, /* 40 - 47 */
943718, 1189010, 1498059, 1887436 /* 48 - 51 */
};
/* TODO: calculate CABAC costs */
static const int i_mb_b_cost_table[X264_MBTYPE_MAX] = {
9, 9, 9, 9, 0, 0, 0, 1, 3, 7, 7, 7, 3, 7, 7, 7, 5, 9, 0
};
static const int i_mb_b16x8_cost_table[17] = {
0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 7, 5, 7, 9, 9, 9
};
static const int i_sub_mb_b_cost_table[13] = {
7, 5, 5, 3, 7, 5, 7, 3, 7, 7, 7, 5, 1
};
static const int i_sub_mb_p_cost_table[4] = {
5, 3, 3, 1
};
static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a );
uint16_t *x264_cost_mv_fpel[52][4];
/* initialize an array of lambda*nbits for all possible mvs */
static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
{
static int16_t *p_cost_mv[52];
int i, j;
if( !p_cost_mv[a->i_qp] )
{
/* could be faster, but isn't called many times */
/* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
p_cost_mv[a->i_qp] = x264_malloc( (4*4*2048 + 1) * sizeof(int16_t) );
p_cost_mv[a->i_qp] += 2*4*2048;
for( i = 0; i <= 2*4*2048; i++ )
{
p_cost_mv[a->i_qp][-i] =
p_cost_mv[a->i_qp][i] = a->i_lambda * bs_size_se( i );
}
}
a->p_cost_mv = p_cost_mv[a->i_qp];
/* FIXME is this useful for all me methods? */
if( h->param.analyse.i_me_method >= X264_ME_ESA && !x264_cost_mv_fpel[a->i_qp][0] )
{
for( j=0; j<4; j++ )
{
x264_cost_mv_fpel[a->i_qp][j] = x264_malloc( (4*2048 + 1) * sizeof(int16_t) );
x264_cost_mv_fpel[a->i_qp][j] += 2*2048;
for( i = -2*2048; i < 2*2048; i++ )
x264_cost_mv_fpel[a->i_qp][j][i] = p_cost_mv[a->i_qp][i*4+j];
}
}
}
static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp )
{
int i = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B);
/* mbrd == 1 -> RD mode decision */
/* mbrd == 2 -> RD refinement */
a->i_mbrd = (i>=6) + (i>=8);
/* conduct the analysis using this lamda and QP */
a->i_qp = h->mb.i_qp = i_qp;
h->mb.i_chroma_qp = h->chroma_qp_table[i_qp];
a->i_lambda = x264_lambda_tab[i_qp];
a->i_lambda2 = x264_lambda2_tab[i_qp];
h->mb.i_me_method = h->param.analyse.i_me_method;
h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P
&& h->mb.i_subpel_refine >= 5;
h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->i_mbrd;
h->mb.b_transform_8x8 = 0;
h->mb.b_noise_reduction = 0;
/* I: Intra part */
a->i_satd_i16x16 =
a->i_satd_i8x8 =
a->i_satd_i4x4 =
a->i_satd_i8x8chroma = COST_MAX;
/* non-RD PCM decision is inaccurate (as is psy-rd), so don't do it */
a->i_satd_pcm = !h->mb.i_psy_rd && a->i_mbrd ? ((uint64_t)X264_PCM_COST*a->i_lambda2 + 128) >> 8 : COST_MAX;
a->b_fast_intra = 0;
h->mb.i_skip_intra =
h->mb.b_lossless ? 0 :
a->i_mbrd ? 2 :
!h->param.analyse.i_trellis && !h->param.analyse.i_noise_reduction;
/* II: Inter part P/B frame */
if( h->sh.i_type != SLICE_TYPE_I )
{
int i, j;
int i_fmv_range = 4 * h->param.analyse.i_mv_range;
// limit motion search to a slightly smaller range than the theoretical limit,
// since the search may go a few iterations past its given range
int i_fpel_border = 5; // umh unconditional radius
int i_spel_border = 8; // 1.5 for subpel_satd, 1.5 for subpel_rd, 2 for bime, round up
/* Calculate max allowed MV range */
#define CLIP_FMV(mv) x264_clip3( mv, -i_fmv_range, i_fmv_range-1 )
h->mb.mv_min[0] = 4*( -16*h->mb.i_mb_x - 24 );
h->mb.mv_max[0] = 4*( 16*( h->sps->i_mb_width - h->mb.i_mb_x - 1 ) + 24 );
h->mb.mv_min_spel[0] = CLIP_FMV( h->mb.mv_min[0] );
h->mb.mv_max_spel[0] = CLIP_FMV( h->mb.mv_max[0] );
h->mb.mv_min_fpel[0] = (h->mb.mv_min_spel[0]>>2) + i_fpel_border;
h->mb.mv_max_fpel[0] = (h->mb.mv_max_spel[0]>>2) - i_fpel_border;
if( h->mb.i_mb_x == 0)
{
int mb_y = h->mb.i_mb_y >> h->sh.b_mbaff;
int mb_height = h->sps->i_mb_height >> h->sh.b_mbaff;
int thread_mvy_range = i_fmv_range;
if( h->param.i_threads > 1 )
{
int pix_y = (h->mb.i_mb_y | h->mb.b_interlaced) * 16;
int thresh = pix_y + h->param.analyse.i_mv_range_thread;
for( i = (h->sh.i_type == SLICE_TYPE_B); i >= 0; i-- )
{
x264_frame_t **fref = i ? h->fref1 : h->fref0;
int i_ref = i ? h->i_ref1 : h->i_ref0;
for( j=0; j<i_ref; j++ )
{
while ( fref [j]->i_lines_completed < thresh )
{
#pragma omp taskwait
}
}
}
if( h->param.b_deterministic )
thread_mvy_range = h->param.analyse.i_mv_range_thread;
if( h->mb.b_interlaced )
thread_mvy_range >>= 1;
}
h->mb.mv_min[1] = 4*( -16*mb_y - 24 );
h->mb.mv_max[1] = 4*( 16*( mb_height - mb_y - 1 ) + 24 );
h->mb.mv_min_spel[1] = x264_clip3( h->mb.mv_min[1], X264_MAX(4*(-512+i_spel_border), -i_fmv_range), i_fmv_range );
h->mb.mv_max_spel[1] = CLIP_FMV( h->mb.mv_max[1] );
h->mb.mv_max_spel[1] = X264_MIN( h->mb.mv_max_spel[1], thread_mvy_range*4 );
h->mb.mv_min_fpel[1] = (h->mb.mv_min_spel[1]>>2) + i_fpel_border;
h->mb.mv_max_fpel[1] = (h->mb.mv_max_spel[1]>>2) - i_fpel_border;
}
#undef CLIP_FMV
a->l0.me16x16.cost =
a->l0.i_rd16x16 =
a->l0.i_cost8x8 = COST_MAX;
for( i = 0; i < 4; i++ )
{
a->l0.i_cost4x4[i] =
a->l0.i_cost8x4[i] =
a->l0.i_cost4x8[i] = COST_MAX;
}
a->l0.i_cost16x8 =
a->l0.i_cost8x16 = COST_MAX;
if( h->sh.i_type == SLICE_TYPE_B )
{
a->l1.me16x16.cost =
a->l1.i_rd16x16 =
a->l1.i_cost8x8 = COST_MAX;
for( i = 0; i < 4; i++ )
{
a->l1.i_cost4x4[i] =
a->l1.i_cost8x4[i] =
a->l1.i_cost4x8[i] =
a->i_cost8x8direct[i] = COST_MAX;
}
a->l1.i_cost16x8 =
a->l1.i_cost8x16 =
a->i_rd16x16bi =
a->i_rd16x16direct =
a->i_rd8x8bi =
a->i_rd16x8bi =
a->i_rd8x16bi =
a->i_cost16x16bi =
a->i_cost16x16direct =
a->i_cost8x8bi =
a->i_cost16x8bi =
a->i_cost8x16bi = COST_MAX;
}
/* Fast intra decision */
if( h->mb.i_mb_xy - h->sh.i_first_mb > 4 )
{
if( IS_INTRA( h->mb.i_mb_type_left )
|| IS_INTRA( h->mb.i_mb_type_top )
|| IS_INTRA( h->mb.i_mb_type_topleft )
|| IS_INTRA( h->mb.i_mb_type_topright )
|| (h->sh.i_type == SLICE_TYPE_P && IS_INTRA( h->fref0[0]->mb_type[h->mb.i_mb_xy] ))
|| (h->mb.i_mb_xy - h->sh.i_first_mb < 3*(h->stat.frame.i_mb_count[I_4x4] + h->stat.frame.i_mb_count[I_8x8] + h->stat.frame.i_mb_count[I_16x16])) )
{ /* intra is likely */ }
else
{
a->b_fast_intra = 1;
}
}
h->mb.b_skip_mc = 0;
}
}
/*
* Handle intra mb
*/
/* Max = 4 */
static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
if( i_neighbour & MB_TOPLEFT )
{
/* top and left available */
*mode++ = I_PRED_16x16_V;
*mode++ = I_PRED_16x16_H;
*mode++ = I_PRED_16x16_DC;
*mode++ = I_PRED_16x16_P;
*pi_count = 4;
}
else if( i_neighbour & MB_LEFT )
{
/* left available*/
*mode++ = I_PRED_16x16_DC_LEFT;
*mode++ = I_PRED_16x16_H;
*pi_count = 2;
}
else if( i_neighbour & MB_TOP )
{
/* top available*/
*mode++ = I_PRED_16x16_DC_TOP;
*mode++ = I_PRED_16x16_V;
*pi_count = 2;
}
else
{
/* none available */
*mode = I_PRED_16x16_DC_128;
*pi_count = 1;
}
}
/* Max = 4 */
static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
if( i_neighbour & MB_TOPLEFT )
{
/* top and left available */
*mode++ = I_PRED_CHROMA_V;
*mode++ = I_PRED_CHROMA_H;
*mode++ = I_PRED_CHROMA_DC;
*mode++ = I_PRED_CHROMA_P;
*pi_count = 4;
}
else if( i_neighbour & MB_LEFT )
{
/* left available*/
*mode++ = I_PRED_CHROMA_DC_LEFT;
*mode++ = I_PRED_CHROMA_H;
*pi_count = 2;
}
else if( i_neighbour & MB_TOP )
{
/* top available*/
*mode++ = I_PRED_CHROMA_DC_TOP;
*mode++ = I_PRED_CHROMA_V;
*pi_count = 2;
}
else
{
/* none available */
*mode = I_PRED_CHROMA_DC_128;
*pi_count = 1;
}
}
/* MAX = 9 */
static void predict_4x4_mode_available( unsigned int i_neighbour,
int *mode, int *pi_count )
{
int b_l = i_neighbour & MB_LEFT;
int b_t = i_neighbour & MB_TOP;
if( b_l && b_t )
{
*pi_count = 6;
*mode++ = I_PRED_4x4_DC;
*mode++ = I_PRED_4x4_H;
*mode++ = I_PRED_4x4_V;
*mode++ = I_PRED_4x4_DDL;
if( i_neighbour & MB_TOPLEFT )
{
*mode++ = I_PRED_4x4_DDR;
*mode++ = I_PRED_4x4_VR;
*mode++ = I_PRED_4x4_HD;
*pi_count += 3;
}
*mode++ = I_PRED_4x4_VL;
*mode++ = I_PRED_4x4_HU;
}
else if( b_l )
{
*mode++ = I_PRED_4x4_DC_LEFT;
*mode++ = I_PRED_4x4_H;
*mode++ = I_PRED_4x4_HU;
*pi_count = 3;
}
else if( b_t )
{
*mode++ = I_PRED_4x4_DC_TOP;
*mode++ = I_PRED_4x4_V;
*mode++ = I_PRED_4x4_DDL;
*mode++ = I_PRED_4x4_VL;
*pi_count = 4;
}
else
{
*mode++ = I_PRED_4x4_DC_128;
*pi_count = 1;
}
}
/* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */
static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct )
{
DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
DECLARE_ALIGNED_16( uint8_t zero[16*FDEC_STRIDE] ) = {0};
int i;
if( do_both_dct || h->mb.b_transform_8x8 )
{
h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], zero );
for( i = 0; i < 4; i++ )
h->zigzagf.scan_8x8( h->mb.pic.fenc_dct8[i], dct8x8[i] );
}
if( do_both_dct || !h->mb.b_transform_8x8 )
{
h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], zero );
for( i = 0; i < 16; i++ )
h->zigzagf.scan_4x4( h->mb.pic.fenc_dct4[i], dct4x4[i] );
}
}
/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */
static inline void x264_mb_cache_fenc_satd( x264_t *h )
{
DECLARE_ALIGNED_16(uint8_t zero[16]) = {0};
uint8_t *fenc;
int x, y, satd_sum = 0, sa8d_sum = 0;
if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis )
x264_psy_trellis_init( h, h->param.analyse.b_transform_8x8 );
if( !h->mb.i_psy_rd )
return;
for( y = 0; y < 4; y++ )
for( x = 0; x < 4; x++ )
{
fenc = h->mb.pic.p_fenc[0]+x*4+y*4*FENC_STRIDE;
h->mb.pic.fenc_satd[y][x] = h->pixf.satd[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE )
- (h->pixf.sad[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE )>>1);
satd_sum += h->mb.pic.fenc_satd[y][x];
}
for( y = 0; y < 2; y++ )
for( x = 0; x < 2; x++ )
{
fenc = h->mb.pic.p_fenc[0]+x*8+y*8*FENC_STRIDE;
h->mb.pic.fenc_sa8d[y][x] = h->pixf.sa8d[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE )
- (h->pixf.sad[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE )>>2);
sa8d_sum += h->mb.pic.fenc_sa8d[y][x];
}
h->mb.pic.fenc_satd_sum = satd_sum;
h->mb.pic.fenc_sa8d_sum = sa8d_sum;
}
static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a )
{
int i;
int i_max;
int predict_mode[4];
uint8_t *p_dstc[2], *p_srcc[2];
if( a->i_satd_i8x8chroma < COST_MAX )
return;
/* 8x8 prediction selection for chroma */
p_dstc[0] = h->mb.pic.p_fdec[1];
p_dstc[1] = h->mb.pic.p_fdec[2];
p_srcc[0] = h->mb.pic.p_fenc[1];
p_srcc[1] = h->mb.pic.p_fenc[2];
predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
a->i_satd_i8x8chroma = COST_MAX;
if( i_max == 4 && h->pixf.intra_satd_x3_8x8c && h->pixf.mbcmp[0] == h->pixf.satd[0] )
{
int satdu[4], satdv[4];
h->pixf.intra_satd_x3_8x8c( p_srcc[0], p_dstc[0], satdu );
h->pixf.intra_satd_x3_8x8c( p_srcc[1], p_dstc[1], satdv );
h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[0] );
h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[1] );
satdu[I_PRED_CHROMA_P] =
h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE, p_srcc[0], FENC_STRIDE );
satdv[I_PRED_CHROMA_P] =
h->pixf.mbcmp[PIXEL_8x8]( p_dstc[1], FDEC_STRIDE, p_srcc[1], FENC_STRIDE );
for( i=0; i<i_max; i++ )
{
int i_mode = predict_mode[i];
int i_satd = satdu[i_mode] + satdv[i_mode]
+ a->i_lambda * bs_size_ue(i_mode);
a->i_satd_i8x8chroma_dir[i] = i_satd;
COPY2_IF_LT( a->i_satd_i8x8chroma, i_satd, a->i_predict8x8chroma, i_mode );
}
}
else
{
for( i=0; i<i_max; i++ )
{
int i_satd;
int i_mode = predict_mode[i];
/* we do the prediction */
if( h->mb.b_lossless )
x264_predict_lossless_8x8_chroma( h, i_mode );
else
{
h->predict_8x8c[i_mode]( p_dstc[0] );
h->predict_8x8c[i_mode]( p_dstc[1] );
}
/* we calculate the cost */
i_satd = h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE,
p_srcc[0], FENC_STRIDE ) +
h->pixf.mbcmp[PIXEL_8x8]( p_dstc[1], FDEC_STRIDE,
p_srcc[1], FENC_STRIDE ) +
a->i_lambda * bs_size_ue( x264_mb_pred_mode8x8c_fix[i_mode] );
a->i_satd_i8x8chroma_dir[i] = i_satd;
COPY2_IF_LT( a->i_satd_i8x8chroma, i_satd, a->i_predict8x8chroma, i_mode );
}
}
h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
}
static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
{
const unsigned int flags = h->sh.i_type == SLICE_TYPE_I ? h->param.analyse.intra : h->param.analyse.inter;
uint8_t *p_src = h->mb.pic.p_fenc[0];
uint8_t *p_dst = h->mb.pic.p_fdec[0];
int i, idx;
int i_max;
int predict_mode[9];
int b_merged_satd = !!h->pixf.intra_mbcmp_x3_16x16 && !h->mb.b_lossless;
/*---------------- Try all mode and calculate their score ---------------*/
/* 16x16 prediction selection */
predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
if( b_merged_satd && i_max == 4 )
{
h->pixf.intra_mbcmp_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir );
h->predict_16x16[I_PRED_16x16_P]( p_dst );
a->i_satd_i16x16_dir[I_PRED_16x16_P] =
h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
for( i=0; i<4; i++ )
{
int cost = a->i_satd_i16x16_dir[i] += a->i_lambda * bs_size_ue(i);
COPY2_IF_LT( a->i_satd_i16x16, cost, a->i_predict16x16, i );
}
}
else
{
for( i = 0; i < i_max; i++ )
{
int i_satd;
int i_mode = predict_mode[i];
if( h->mb.b_lossless )
x264_predict_lossless_16x16( h, i_mode );
else
h->predict_16x16[i_mode]( p_dst );
i_satd = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) +
a->i_lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] );
COPY2_IF_LT( a->i_satd_i16x16, i_satd, a->i_predict16x16, i_mode );
a->i_satd_i16x16_dir[i_mode] = i_satd;
}
}
if( h->sh.i_type == SLICE_TYPE_B )
/* cavlc mb type prefix */
a->i_satd_i16x16 += a->i_lambda * i_mb_b_cost_table[I_16x16];
if( a->b_fast_intra && a->i_satd_i16x16 > 2*i_satd_inter )
return;
/* 8x8 prediction selection */
if( flags & X264_ANALYSE_I8x8 )
{
DECLARE_ALIGNED_16( uint8_t edge[33] );
x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8];
int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
int i_cost = 0;
b_merged_satd = h->pixf.intra_sa8d_x3_8x8 && h->pixf.mbcmp[0] == h->pixf.satd[0];
// FIXME some bias like in i4x4?
if( h->sh.i_type == SLICE_TYPE_B )
i_cost += a->i_lambda * i_mb_b_cost_table[I_8x8];
for( idx = 0;; idx++ )
{
int x = idx&1;
int y = idx>>1;
uint8_t *p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
uint8_t *p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
int i_best = COST_MAX;
int i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max );
x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
if( b_merged_satd && i_max == 9 )
{
int satd[9];
h->pixf.intra_sa8d_x3_8x8( p_src_by, edge, satd );
satd[i_pred_mode] -= 3 * a->i_lambda;
for( i=2; i>=0; i-- )
{
int cost = a->i_satd_i8x8_dir[i][idx] = satd[i] + 4 * a->i_lambda;
COPY2_IF_LT( i_best, cost, a->i_predict8x8[idx], i );
}
i = 3;
}
else
i = 0;
for( ; i<i_max; i++ )
{
int i_satd;
int i_mode = predict_mode[i];
if( h->mb.b_lossless )
x264_predict_lossless_8x8( h, p_dst_by, idx, i_mode, edge );
else
h->predict_8x8[i_mode]( p_dst_by, edge );
i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE )
+ a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode );
a->i_satd_i8x8_dir[i_mode][idx] = i_satd;
}
i_cost += i_best;
if( idx == 3 || i_cost > i_satd_thresh )
break;
/* we need to encode this block now (for next ones) */
h->predict_8x8[a->i_predict8x8[idx]]( p_dst_by, edge );
x264_mb_encode_i8x8( h, idx, a->i_qp );
x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
}
if( idx == 3 )
{
a->i_satd_i8x8 = i_cost;
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.i8x8_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->mb.pic.i8x8_dct_buf, h->dct.luma8x8, sizeof(h->mb.pic.i8x8_dct_buf) );
}
}
else
{
a->i_satd_i8x8 = COST_MAX;
i_cost = i_cost * 4/(idx+1);
}
if( X264_MIN(i_cost, a->i_satd_i16x16) > i_satd_inter*(5+!!a->i_mbrd)/4 )
return;
}
/* 4x4 prediction selection */
if( flags & X264_ANALYSE_I4x4 )
{
int i_cost;
int i_satd_thresh = X264_MIN3( i_satd_inter, a->i_satd_i16x16, a->i_satd_i8x8 );
b_merged_satd = h->pixf.intra_satd_x3_4x4 && h->pixf.mbcmp[0] == h->pixf.satd[0];
if( a->i_mbrd )
i_satd_thresh = i_satd_thresh * (10-a->b_fast_intra)/8;
i_cost = a->i_lambda * 24; /* from JVT (SATD0) */
if( h->sh.i_type == SLICE_TYPE_B )
i_cost += a->i_lambda * i_mb_b_cost_table[I_4x4];
for( idx = 0;; idx++ )
{
uint8_t *p_src_by = p_src + block_idx_xy_fenc[idx];
uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx];
int i_best = COST_MAX;
int i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max );
if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
*(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
if( b_merged_satd && i_max >= 6 )
{
int satd[9];
h->pixf.intra_satd_x3_4x4( p_src_by, p_dst_by, satd );
satd[i_pred_mode] -= 3 * a->i_lambda;
for( i=2; i>=0; i-- )
COPY2_IF_LT( i_best, satd[i] + 4 * a->i_lambda,
a->i_predict4x4[idx], i );
i = 3;
}
else
i = 0;
for( ; i<i_max; i++ )
{
int i_satd;
int i_mode = predict_mode[i];
if( h->mb.b_lossless )
x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode );
else
h->predict_4x4[i_mode]( p_dst_by );
i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE,
p_src_by, FENC_STRIDE )
+ a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode );
}
i_cost += i_best;
if( i_cost > i_satd_thresh || idx == 15 )
break;
/* we need to encode this block now (for next ones) */
h->predict_4x4[a->i_predict4x4[idx]]( p_dst_by );
x264_mb_encode_i4x4( h, idx, a->i_qp );
h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
}
if( idx == 15 )
{
a->i_satd_i4x4 = i_cost;
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.i4x4_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->mb.pic.i4x4_dct_buf, h->dct.luma4x4, sizeof(h->mb.pic.i4x4_dct_buf) );
}
}
else
a->i_satd_i4x4 = COST_MAX;
}
}
static void x264_intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh )
{
if( a->i_satd_i16x16 <= i_satd_thresh )
{
h->mb.i_type = I_16x16;
x264_analyse_update_cache( h, a );
a->i_satd_i16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->i_satd_i16x16 = COST_MAX;
if( a->i_satd_i4x4 <= i_satd_thresh && a->i_satd_i4x4 < COST_MAX )
{
h->mb.i_type = I_4x4;
x264_analyse_update_cache( h, a );
a->i_satd_i4x4 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->i_satd_i4x4 = COST_MAX;
if( a->i_satd_i8x8 <= i_satd_thresh && a->i_satd_i8x8 < COST_MAX )
{
h->mb.i_type = I_8x8;
x264_analyse_update_cache( h, a );
a->i_satd_i8x8 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->i_satd_i8x8 = COST_MAX;
}
static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a )
{
uint8_t *p_src = h->mb.pic.p_fenc[0];
uint8_t *p_dst = h->mb.pic.p_fdec[0];
int i, j, idx, x, y;
int i_max, i_mode, i_thresh;
uint64_t i_satd, i_best;
int i_pred_mode;
int predict_mode[9];
h->mb.i_skip_intra = 0;
if( h->mb.i_type == I_16x16 )
{
int old_pred_mode = a->i_predict16x16;
i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8;
i_best = a->i_satd_i16x16;
predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
for( i = 0; i < i_max; i++ )
{
int i_mode = predict_mode[i];
if( i_mode == old_pred_mode || a->i_satd_i16x16_dir[i_mode] > i_thresh )
continue;
h->mb.i_intra16x16_pred_mode = i_mode;
i_satd = x264_rd_cost_mb( h, a->i_lambda2 );
COPY2_IF_LT( i_best, i_satd, a->i_predict16x16, i_mode );
}
}
else if( h->mb.i_type == I_4x4 )
{
uint32_t pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning
int i_nnz = 0;
for( idx = 0; idx < 16; idx++ )
{
uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx];
i_best = COST_MAX64;
i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max );
if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
*(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
for( i = 0; i < i_max; i++ )
{
i_mode = predict_mode[i];
if( h->mb.b_lossless )
x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode );
else
h->predict_4x4[i_mode]( p_dst_by );
i_satd = x264_rd_cost_i4x4( h, a->i_lambda2, idx, i_mode );
if( i_best > i_satd )
{
a->i_predict4x4[idx] = i_mode;
i_best = i_satd;
pels[0] = *(uint32_t*)(p_dst_by+0*FDEC_STRIDE);
pels[1] = *(uint32_t*)(p_dst_by+1*FDEC_STRIDE);
pels[2] = *(uint32_t*)(p_dst_by+2*FDEC_STRIDE);
pels[3] = *(uint32_t*)(p_dst_by+3*FDEC_STRIDE);
i_nnz = h->mb.cache.non_zero_count[x264_scan8[idx]];
}
}
*(uint32_t*)(p_dst_by+0*FDEC_STRIDE) = pels[0];
*(uint32_t*)(p_dst_by+1*FDEC_STRIDE) = pels[1];
*(uint32_t*)(p_dst_by+2*FDEC_STRIDE) = pels[2];
*(uint32_t*)(p_dst_by+3*FDEC_STRIDE) = pels[3];
h->mb.cache.non_zero_count[x264_scan8[idx]] = i_nnz;
h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
}
}
else if( h->mb.i_type == I_8x8 )
{
DECLARE_ALIGNED_16( uint8_t edge[33] );
for( idx = 0; idx < 4; idx++ )
{
uint64_t pels_h = 0;
uint8_t pels_v[7];
int i_nnz[3];
uint8_t *p_src_by;
uint8_t *p_dst_by;
int j;
i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8;
i_best = COST_MAX64;
i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
x = idx&1;
y = idx>>1;
p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max );
x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
for( i = 0; i < i_max; i++ )
{
i_mode = predict_mode[i];
if( a->i_satd_i8x8_dir[i_mode][idx] > i_thresh )
continue;
if( h->mb.b_lossless )
x264_predict_lossless_8x8( h, p_dst_by, idx, i_mode, edge );
else
h->predict_8x8[i_mode]( p_dst_by, edge );
i_satd = x264_rd_cost_i8x8( h, a->i_lambda2, idx, i_mode );
if( i_best > i_satd )
{
a->i_predict8x8[idx] = i_mode;
i_best = i_satd;
pels_h = *(uint64_t*)(p_dst_by+7*FDEC_STRIDE);
if( !(idx&1) )
for( j=0; j<7; j++ )
pels_v[j] = p_dst_by[7+j*FDEC_STRIDE];
for( j=0; j<3; j++ )
i_nnz[j] = h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]];
}
}
*(uint64_t*)(p_dst_by+7*FDEC_STRIDE) = pels_h;
if( !(idx&1) )
for( j=0; j<7; j++ )
p_dst_by[7+j*FDEC_STRIDE] = pels_v[j];
for( j=0; j<3; j++ )
h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]] = i_nnz[j];
x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
}
}
/* RD selection for chroma prediction */
predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
if( i_max > 1 )
{
i_thresh = a->i_satd_i8x8chroma * 5/4;
for( i = j = 0; i < i_max; i++ )
if( a->i_satd_i8x8chroma_dir[i] < i_thresh &&
predict_mode[i] != a->i_predict8x8chroma )
{
predict_mode[j++] = predict_mode[i];
}
i_max = j;
if( i_max > 0 )
{
int i_chroma_lambda = x264_lambda2_tab[h->mb.i_chroma_qp];
/* the previous thing encoded was x264_intra_rd(), so the pixels and
* coefs for the current chroma mode are still around, so we only
* have to recount the bits. */
i_best = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 );
for( i = 0; i < i_max; i++ )
{
i_mode = predict_mode[i];
if( h->mb.b_lossless )
x264_predict_lossless_8x8_chroma( h, i_mode );
else
{
h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
}
/* if we've already found a mode that needs no residual, then
* probably any mode with a residual will be worse.
* so avoid dct on the remaining modes to improve speed. */
i_satd = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, i_mode, h->mb.i_cbp_chroma != 0x00 );
COPY2_IF_LT( i_best, i_satd, a->i_predict8x8chroma, i_mode );
}
h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
}
}
}
#define LOAD_FENC( m, src, xoff, yoff) \
(m)->i_stride[0] = h->mb.pic.i_stride[0]; \
(m)->i_stride[1] = h->mb.pic.i_stride[1]; \
(m)->p_fenc[0] = &(src)[0][(xoff)+(yoff)*FENC_STRIDE]; \
(m)->p_fenc[1] = &(src)[1][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE]; \
(m)->p_fenc[2] = &(src)[2][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE];
#define LOAD_HPELS(m, src, list, ref, xoff, yoff) \
(m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[4] = &(src)[4][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
(m)->p_fref[5] = &(src)[5][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
(m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]];
#define REF_COST(list, ref) \
(a->i_lambda * bs_size_te( h->sh.i_num_ref_idx_l##list##_active - 1, ref ))
static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
int i_ref, i_mvc;
DECLARE_ALIGNED_4( int16_t mvc[8][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
a->l0.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ )
{
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* early termination
* SSD threshold would probably be better than SATD */
if( i_ref == 0
&& a->b_try_pskip
&& m.cost-m.cost_mv < 300*a->i_lambda
&& abs(m.mv[0]-h->mb.cache.pskip_mv[0])
+ abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1
&& x264_macroblock_probe_pskip( h ) )
{
h->mb.i_type = P_SKIP;
x264_analyse_update_cache( h, a );
assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
return;
}
m.cost += i_ref_cost;
i_halfpel_thresh += i_ref_cost;
if( m.cost < a->l0.me16x16.cost )
h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) );
/* save mv for predicting neighbors */
*(uint32_t*)a->l0.mvc[i_ref][0] =
*(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
}
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
h->mb.i_type = P_L0;
if( a->i_mbrd )
{
x264_mb_cache_fenc_satd( h );
if( a->l0.me16x16.i_ref == 0 && *(uint32_t*)a->l0.me16x16.mv == *(uint32_t*)h->mb.cache.pskip_mv )
{
h->mb.i_partition = D_16x16;
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
}
}
static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
int i_ref;
uint8_t **p_fenc = h->mb.pic.p_fenc;
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = /*h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : */NULL;
int i;
int i_maxref = h->mb.pic.i_fref[0]-1;
h->mb.i_partition = D_8x8;
/* early termination: if 16x16 chose ref 0, then evalute no refs older
* than those used by the neighbors */
if( i_maxref > 0 && a->l0.me16x16.i_ref == 0 &&
h->mb.i_mb_type_top && h->mb.i_mb_type_left )
{
i_maxref = 0;
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 - 1 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 0 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 2 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 4 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 0 - 1 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 2*8 - 1 ] );
}
for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
*(uint32_t*)a->l0.mvc[i_ref][0] = *(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy];
for( i = 0; i < 4; i++ )
{
x264_me_t *l0m = &a->l0.me8x8[i];
const int x8 = i%2;
const int y8 = i/2;
m.i_pixel = PIXEL_8x8;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 );
l0m->cost = INT_MAX;
for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
{
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh );
m.cost += i_ref_cost;
i_halfpel_thresh += i_ref_cost;
*(uint32_t*)a->l0.mvc[i_ref][i+1] = *(uint32_t*)m.mv;
if( m.cost < l0m->cost )
h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
}
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
/* mb type cost */
l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
a->l0.me8x8[2].cost + a->l0.me8x8[3].cost;
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
}
static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a )
{
const int i_ref = a->l0.me16x16.i_ref;
const int i_ref_cost = REF_COST( 0, i_ref );
uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
int i_mvc;
int16_t (*mvc)[2] = a->l0.mvc[i_ref];
int i;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
i_mvc = 1;
*(uint32_t*)mvc[0] = *(uint32_t*)a->l0.me16x16.mv;
for( i = 0; i < 4; i++ )
{
x264_me_t *m = &a->l0.me8x8[i];
const int x8 = i%2;
const int y8 = i/2;
m->i_pixel = PIXEL_8x8;
m->p_cost_mv = a->p_cost_mv;
m->i_ref_cost = i_ref_cost;
m->i_ref = i_ref;
LOAD_FENC( m, p_fenc, 8*x8, 8*y8 );
LOAD_HPELS( m, p_fref, 0, i_ref, 8*x8, 8*y8 );
x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp );
x264_me_search( h, m, mvc, i_mvc );
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, m->mv );
*(uint32_t*)mvc[i_mvc] = *(uint32_t*)m->mv;
i_mvc++;
/* mb type cost */
m->cost += i_ref_cost;
m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
/* theoretically this should include 4*ref_cost,
* but 3 seems a better approximation of cabac. */
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
a->l0.me8x8[2].cost + a->l0.me8x8[3].cost -
REF_COST( 0, a->l0.me16x16.i_ref );
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
}
static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
DECLARE_ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_16x8;
for( i = 0; i < 2; i++ )
{
x264_me_t *l0m = &a->l0.me16x8[i];
const int ref8[2] = { a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref };
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_16x8;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 0, 8*i );
l0m->cost = INT_MAX;
for( j = 0; j < i_ref8s; j++ )
{
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
/* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */
*(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0];
*(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][2*i+1];
*(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][2*i+2];
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i );
x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp );
x264_me_search( h, &m, mvc, 3 );
m.cost += i_ref_cost;
if( m.cost < l0m->cost )
h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
}
x264_macroblock_cache_mv_ptr( h, 0, 2*i, 4, 2, 0, l0m->mv );
x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref );
}
a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost;
}
static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
DECLARE_ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x16;
for( i = 0; i < 2; i++ )
{
x264_me_t *l0m = &a->l0.me8x16[i];
const int ref8[2] = { a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref };
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_8x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*i, 0 );
l0m->cost = INT_MAX;
for( j = 0; j < i_ref8s; j++ )
{
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
*(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0];
*(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][i+1];
*(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][i+3];
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 );
x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search( h, &m, mvc, 3 );
m.cost += i_ref_cost;
if( m.cost < l0m->cost )
h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
}
x264_macroblock_cache_mv_ptr( h, 2*i, 0, 2, 4, 0, l0m->mv );
x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref );
}
a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost;
}
static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
{
DECLARE_ALIGNED_8( uint8_t pix1[16*8] );
uint8_t *pix2 = pix1+8;
const int i_stride = h->mb.pic.i_stride[1];
const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE;
#define CHROMA4x4MC( width, height, me, x, y ) \
h->mc.mc_chroma( &pix1[x+y*16], 16, &p_fref[4][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height ); \
h->mc.mc_chroma( &pix2[x+y*16], 16, &p_fref[5][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height );
if( pixel == PIXEL_4x4 )
{
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][0], 0,0 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][1], 2,0 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][2], 0,2 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][3], 2,2 );
}
else if( pixel == PIXEL_8x4 )
{
CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][0], 0,0 );
CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][1], 0,2 );
}
else
{
CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][0], 0,0 );
CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][1], 2,0 );
}
return h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][oe], FENC_STRIDE, pix1, 16 )
+ h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[2][oe], FENC_STRIDE, pix2, 16 );
}
static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i4x4;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i4x4 = 0; i4x4 < 4; i4x4++ )
{
const int idx = 4*i8x8 + i4x4;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i4x4 == 0);
x264_me_t *m = &a->l0.me4x4[i8x8][i4x4];
m->i_pixel = PIXEL_4x4;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc );
x264_macroblock_cache_mv_ptr( h, x4, y4, 1, 1, 0, m->mv );
}
a->l0.i_cost4x4[i8x8] = a->l0.me4x4[i8x8][0].cost +
a->l0.me4x4[i8x8][1].cost +
a->l0.me4x4[i8x8][2].cost +
a->l0.me4x4[i8x8][3].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x4];
if( h->mb.b_chroma_me )
a->l0.i_cost4x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x4 );
}
static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i8x4;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i8x4 = 0; i8x4 < 2; i8x4++ )
{
const int idx = 4*i8x8 + 2*i8x4;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i8x4 == 0);
x264_me_t *m = &a->l0.me8x4[i8x8][i8x4];
m->i_pixel = PIXEL_8x4;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 2, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
x264_macroblock_cache_mv_ptr( h, x4, y4, 2, 1, 0, m->mv );
}
a->l0.i_cost8x4[i8x8] = a->l0.me8x4[i8x8][0].cost + a->l0.me8x4[i8x8][1].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x4];
if( h->mb.b_chroma_me )
a->l0.i_cost8x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_8x4 );
}
static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i4x8;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i4x8 = 0; i4x8 < 2; i4x8++ )
{
const int idx = 4*i8x8 + i4x8;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i4x8 == 0);
x264_me_t *m = &a->l0.me4x8[i8x8][i4x8];
m->i_pixel = PIXEL_4x8;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
x264_macroblock_cache_mv_ptr( h, x4, y4, 1, 2, 0, m->mv );
}
a->l0.i_cost4x8[i8x8] = a->l0.me4x8[i8x8][0].cost + a->l0.me4x8[i8x8][1].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x8];
if( h->mb.b_chroma_me )
a->l0.i_cost4x8[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 );
}
static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a )
{
/* Assumes that fdec still contains the results of
* x264_mb_predict_mv_direct16x16 and x264_mb_mc */
uint8_t **p_fenc = h->mb.pic.p_fenc;
uint8_t **p_fdec = h->mb.pic.p_fdec;
int i;
a->i_cost16x16direct = a->i_lambda * i_mb_b_cost_table[B_DIRECT];
for( i = 0; i < 4; i++ )
{
const int x = (i&1)*8;
const int y = (i>>1)*8;
a->i_cost16x16direct +=
a->i_cost8x8direct[i] =
h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[0][x+y*FENC_STRIDE], FENC_STRIDE, &p_fdec[0][x+y*FDEC_STRIDE], FDEC_STRIDE );
/* mb type cost */
a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8];
}
}
#define WEIGHTED_AVG( size, pix, stride, src1, stride1, src2, stride2 ) \
{ \
h->mc.avg[size]( pix, stride, src1, stride1, src2, stride2, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \
}
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
{
DECLARE_ALIGNED_16( uint8_t pix0[16*16] );
DECLARE_ALIGNED_16( uint8_t pix1[16*16] );
uint8_t *src0, *src1;
int stride0 = 16, stride1 = 16;
x264_me_t m;
int i_ref, i_mvc;
DECLARE_ALIGNED_4( int16_t mvc[9][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
/* ME for List 0 */
a->l0.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ )
{
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* add ref cost */
m.cost += REF_COST( 0, i_ref );
if( m.cost < a->l0.me16x16.cost )
{
a->l0.i_ref = i_ref;
h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) );
}
/* save mv for predicting neighbors */
*(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
}
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l0.me16x16.cost -= REF_COST( 0, a->l0.i_ref );
/* ME for list 1 */
i_halfpel_thresh = INT_MAX;
p_halfpel_thresh = h->mb.pic.i_fref[1]>1 ? &i_halfpel_thresh : NULL;
a->l1.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->mb.pic.i_fref[1]; i_ref++ )
{
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[1][i_ref], 1, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 1, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 1, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* add ref cost */
m.cost += REF_COST( 1, i_ref );
if( m.cost < a->l1.me16x16.cost )
{
a->l1.i_ref = i_ref;
h->mc.memcpy_aligned( &a->l1.me16x16, &m, sizeof(x264_me_t) );
}
/* save mv for predicting neighbors */
*(uint32_t*)h->mb.mvr[1][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
}
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l1.me16x16.cost -= REF_COST( 1, a->l1.i_ref );
/* Set global ref, needed for other modes? */
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
/* get cost of BI mode */
src0 = h->mc.get_ref( pix0, &stride0,
h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16 );
src1 = h->mc.get_ref( pix1, &stride1,
h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16 );
h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] );
a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 )
+ REF_COST( 0, a->l0.i_ref )
+ REF_COST( 1, a->l1.i_ref )
+ a->l0.me16x16.cost_mv
+ a->l1.me16x16.cost_mv;
/* mb type cost */
a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI];
a->l0.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L0_L0];
a->l1.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L1_L1];
}
static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i )
{
const int x = 2*(i%2);
const int y = 2*(i/2);
switch( h->mb.i_sub_partition[i] )
{
case D_L0_8x8:
x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 0, a->l0.me8x8[i].mv );
break;
case D_L0_8x4:
x264_macroblock_cache_mv_ptr( h, x, y+0, 2, 1, 0, a->l0.me8x4[i][0].mv );
x264_macroblock_cache_mv_ptr( h, x, y+1, 2, 1, 0, a->l0.me8x4[i][1].mv );
break;
case D_L0_4x8:
x264_macroblock_cache_mv_ptr( h, x+0, y, 1, 2, 0, a->l0.me4x8[i][0].mv );
x264_macroblock_cache_mv_ptr( h, x+1, y, 1, 2, 0, a->l0.me4x8[i][1].mv );
break;
case D_L0_4x4:
x264_macroblock_cache_mv_ptr( h, x+0, y+0, 1, 1, 0, a->l0.me4x4[i][0].mv );
x264_macroblock_cache_mv_ptr( h, x+1, y+0, 1, 1, 0, a->l0.me4x4[i][1].mv );
x264_macroblock_cache_mv_ptr( h, x+0, y+1, 1, 1, 0, a->l0.me4x4[i][2].mv );
x264_macroblock_cache_mv_ptr( h, x+1, y+1, 1, 1, 0, a->l0.me4x4[i][3].mv );
break;
default:
x264_log( h, X264_LOG_ERROR, "internal error\n" );
break;
}
}
#define CACHE_MV_BI(x,y,dx,dy,me0,me1,part) \
if( x264_mb_partition_listX_table[0][part] ) \
{ \
x264_macroblock_cache_ref( h, x,y,dx,dy, 0, a->l0.i_ref ); \
x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 0, me0.mv ); \
} \
else \
{ \
x264_macroblock_cache_ref( h, x,y,dx,dy, 0, -1 ); \
x264_macroblock_cache_mv( h, x,y,dx,dy, 0, 0 ); \
if( b_mvd ) \
x264_macroblock_cache_mvd( h, x,y,dx,dy, 0, 0 ); \
} \
if( x264_mb_partition_listX_table[1][part] ) \
{ \
x264_macroblock_cache_ref( h, x,y,dx,dy, 1, a->l1.i_ref ); \
x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 1, me1.mv ); \
} \
else \
{ \
x264_macroblock_cache_ref( h, x,y,dx,dy, 1, -1 ); \
x264_macroblock_cache_mv( h, x,y,dx,dy, 1, 0 ); \
if( b_mvd ) \
x264_macroblock_cache_mvd( h, x,y,dx,dy, 1, 0 ); \
}
static inline void x264_mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
{
int x = (i%2)*2;
int y = (i/2)*2;
if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 )
{
x264_mb_load_mv_direct8x8( h, i );
if( b_mvd )
{
x264_macroblock_cache_mvd( h, x, y, 2, 2, 0, 0 );
x264_macroblock_cache_mvd( h, x, y, 2, 2, 1, 0 );
x264_macroblock_cache_skip( h, x, y, 2, 2, 1 );
}
}
else
{
CACHE_MV_BI( x, y, 2, 2, a->l0.me8x8[i], a->l1.me8x8[i], h->mb.i_sub_partition[i] );
}
}
static inline void x264_mb_cache_mv_b16x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
{
CACHE_MV_BI( 0, 2*i, 4, 2, a->l0.me16x8[i], a->l1.me16x8[i], a->i_mb_partition16x8[i] );
}
static inline void x264_mb_cache_mv_b8x16( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
{
CACHE_MV_BI( 2*i, 0, 2, 4, a->l0.me8x16[i], a->l1.me8x16[i], a->i_mb_partition8x16[i] );
}
#undef CACHE_MV_BI
static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a )
{
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
DECLARE_ALIGNED_8( uint8_t pix[2][8*8] );
int i, l;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
a->i_cost8x8bi = 0;
for( i = 0; i < 4; i++ )
{
const int x8 = i%2;
const int y8 = i/2;
int i_part_cost;
int i_part_cost_bi = 0;
int stride[2] = {8,8};
uint8_t *src[2];
for( l = 0; l < 2; l++ )
{
x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
x264_me_t *m = &lX->me8x8[i];
m->i_pixel = PIXEL_8x8;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*x8, 8*y8 );
x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
x264_me_search( h, m, &lX->me16x16.mv, 1 );
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, l, m->mv );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
m->mv[0], m->mv[1], 8, 8 );
i_part_cost_bi += m->cost_mv;
/* FIXME: ref cost */
}
h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] );
i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 )
+ a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8];
a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
i_part_cost = a->l0.me8x8[i].cost;
h->mb.i_sub_partition[i] = D_L0_8x8;
COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 );
COPY2_IF_LT( i_part_cost, i_part_cost_bi, h->mb.i_sub_partition[i], D_BI_8x8 );
COPY2_IF_LT( i_part_cost, a->i_cost8x8direct[i], h->mb.i_sub_partition[i], D_DIRECT_8x8 );
a->i_cost8x8bi += i_part_cost;
/* XXX Needed for x264_mb_predict_mv */
x264_mb_cache_mv_b8x8( h, a, i, 0 );
}
/* mb type cost */
a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8];
}
static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a )
{
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
DECLARE_ALIGNED_16( uint8_t pix[2][16*8] );
DECLARE_ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_16x8;
a->i_cost16x8bi = 0;
for( i = 0; i < 2; i++ )
{
int i_part_cost;
int i_part_cost_bi = 0;
int stride[2] = {16,16};
uint8_t *src[2];
/* TODO: check only the list(s) that were used in b8x8? */
for( l = 0; l < 2; l++ )
{
x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
x264_me_t *m = &lX->me16x8[i];
m->i_pixel = PIXEL_16x8;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 0, 8*i );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 0, 8*i );
*(uint32_t*)mvc[0] = *(uint32_t*)lX->me8x8[2*i].mv;
*(uint32_t*)mvc[1] = *(uint32_t*)lX->me8x8[2*i+1].mv;
x264_mb_predict_mv( h, l, 8*i, 2, m->mvp );
x264_me_search( h, m, mvc, 2 );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
m->mv[0], m->mv[1], 16, 8 );
/* FIXME: ref cost */
i_part_cost_bi += m->cost_mv;
}
h->mc.avg[PIXEL_16x8]( pix[0], 16, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] );
i_part_cost_bi += h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 );
i_part_cost = a->l0.me16x8[i].cost;
a->i_mb_partition16x8[i] = D_L0_8x8; /* not actually 8x8, only the L0 matters */
if( a->l1.me16x8[i].cost < i_part_cost )
{
i_part_cost = a->l1.me16x8[i].cost;
a->i_mb_partition16x8[i] = D_L1_8x8;
}
if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
{
i_part_cost = i_part_cost_bi;
a->i_mb_partition16x8[i] = D_BI_8x8;
}
a->i_cost16x8bi += i_part_cost;
x264_mb_cache_mv_b16x8( h, a, i, 0 );
}
/* mb type cost */
a->i_mb_type16x8 = B_L0_L0
+ (a->i_mb_partition16x8[0]>>2) * 3
+ (a->i_mb_partition16x8[1]>>2);
a->i_cost16x8bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type16x8];
}
static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a )
{
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
DECLARE_ALIGNED_8( uint8_t pix[2][8*16] );
DECLARE_ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_8x16;
a->i_cost8x16bi = 0;
for( i = 0; i < 2; i++ )
{
int i_part_cost;
int i_part_cost_bi = 0;
int stride[2] = {8,8};
uint8_t *src[2];
for( l = 0; l < 2; l++ )
{
x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
x264_me_t *m = &lX->me8x16[i];
m->i_pixel = PIXEL_8x16;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 8*i, 0 );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*i, 0 );
*(uint32_t*)mvc[0] = *(uint32_t*)lX->me8x8[i].mv;
*(uint32_t*)mvc[1] = *(uint32_t*)lX->me8x8[i+2].mv;
x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
x264_me_search( h, m, mvc, 2 );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
m->mv[0], m->mv[1], 8, 16 );
/* FIXME: ref cost */
i_part_cost_bi += m->cost_mv;
}
h->mc.avg[PIXEL_8x16]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] );
i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 );
i_part_cost = a->l0.me8x16[i].cost;
a->i_mb_partition8x16[i] = D_L0_8x8;
if( a->l1.me8x16[i].cost < i_part_cost )
{
i_part_cost = a->l1.me8x16[i].cost;
a->i_mb_partition8x16[i] = D_L1_8x8;
}
if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
{
i_part_cost = i_part_cost_bi;
a->i_mb_partition8x16[i] = D_BI_8x8;
}
a->i_cost8x16bi += i_part_cost;
x264_mb_cache_mv_b8x16( h, a, i, 0 );
}
/* mb type cost */
a->i_mb_type8x16 = B_L0_L0
+ (a->i_mb_partition8x16[0]>>2) * 3
+ (a->i_mb_partition8x16[1]>>2);
a->i_cost8x16bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type8x16];
}
static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd )
{
int thresh = i_satd * 5/4;
h->mb.i_type = P_L0;
if( a->l0.i_rd16x16 == COST_MAX && a->l0.me16x16.cost <= i_satd * 3/2 )
{
h->mb.i_partition = D_16x16;
x264_analyse_update_cache( h, a );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
a->l0.me16x16.cost = a->l0.i_rd16x16;
if( a->l0.i_cost16x8 <= thresh )
{
h->mb.i_partition = D_16x8;
x264_analyse_update_cache( h, a );
a->l0.i_cost16x8 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->l0.i_cost16x8 = COST_MAX;
if( a->l0.i_cost8x16 <= thresh )
{
h->mb.i_partition = D_8x16;
x264_analyse_update_cache( h, a );
a->l0.i_cost8x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->l0.i_cost8x16 = COST_MAX;
if( a->l0.i_cost8x8 <= thresh )
{
h->mb.i_type = P_8x8;
h->mb.i_partition = D_8x8;
if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 )
{
int i;
x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref );
x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref );
x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref );
x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref );
for( i = 0; i < 4; i++ )
{
int costs[4] = {a->l0.i_cost4x4[i], a->l0.i_cost8x4[i], a->l0.i_cost4x8[i], a->l0.me8x8[i].cost};
int thresh = X264_MIN4( costs[0], costs[1], costs[2], costs[3] ) * 5 / 4;
int subtype, btype = D_L0_8x8;
uint64_t bcost = COST_MAX64;
for( subtype = D_L0_4x4; subtype <= D_L0_8x8; subtype++ )
{
uint64_t cost;
if( costs[subtype] > thresh || (subtype == D_L0_8x8 && bcost == COST_MAX64) )
continue;
h->mb.i_sub_partition[i] = subtype;
x264_mb_cache_mv_p8x8( h, a, i );
cost = x264_rd_cost_part( h, a->i_lambda2, i<<2, PIXEL_8x8 );
COPY2_IF_LT( bcost, cost, btype, subtype );
}
h->mb.i_sub_partition[i] = btype;
x264_mb_cache_mv_p8x8( h, a, i );
}
}
else
x264_analyse_update_cache( h, a );
a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 );
}
else
a->l0.i_cost8x8 = COST_MAX;
}
static void x264_mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
{
int thresh = i_satd_inter * (17 + (!!h->mb.i_psy_rd))/16;
if( a->b_direct_available && a->i_rd16x16direct == COST_MAX )
{
h->mb.i_type = B_DIRECT;
/* Assumes direct/skip MC is still in fdec */
/* Requires b-rdo to be done before intra analysis */
h->mb.b_skip_mc = 1;
x264_analyse_update_cache( h, a );
a->i_rd16x16direct = x264_rd_cost_mb( h, a->i_lambda2 );
h->mb.b_skip_mc = 0;
}
//FIXME not all the update_cache calls are needed
h->mb.i_partition = D_16x16;
/* L0 */
if( a->l0.me16x16.cost <= thresh && a->l0.i_rd16x16 == COST_MAX )
{
h->mb.i_type = B_L0_L0;
x264_analyse_update_cache( h, a );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
/* L1 */
if( a->l1.me16x16.cost <= thresh && a->l1.i_rd16x16 == COST_MAX )
{
h->mb.i_type = B_L1_L1;
x264_analyse_update_cache( h, a );
a->l1.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
/* BI */
if( a->i_cost16x16bi <= thresh && a->i_rd16x16bi == COST_MAX )
{
h->mb.i_type = B_BI_BI;
x264_analyse_update_cache( h, a );
a->i_rd16x16bi = x264_rd_cost_mb( h, a->i_lambda2 );
}
/* 8x8 */
if( a->i_cost8x8bi <= thresh && a->i_rd8x8bi == COST_MAX )
{
h->mb.i_type = B_8x8;
h->mb.i_partition = D_8x8;
x264_analyse_update_cache( h, a );
a->i_rd8x8bi = x264_rd_cost_mb( h, a->i_lambda2 );
x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
}
/* 16x8 */
if( a->i_cost16x8bi <= thresh && a->i_rd16x8bi == COST_MAX )
{
h->mb.i_type = a->i_mb_type16x8;
h->mb.i_partition = D_16x8;
x264_analyse_update_cache( h, a );
a->i_rd16x8bi = x264_rd_cost_mb( h, a->i_lambda2 );
}
/* 8x16 */
if( a->i_cost8x16bi <= thresh && a->i_rd8x16bi == COST_MAX )
{
h->mb.i_type = a->i_mb_type8x16;
h->mb.i_partition = D_8x16;
x264_analyse_update_cache( h, a );
a->i_rd8x16bi = x264_rd_cost_mb( h, a->i_lambda2 );
}
}
static void x264_refine_bidir( x264_t *h, x264_mb_analysis_t *a )
{
const int i_biweight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref];
int i;
if( IS_INTRA(h->mb.i_type) )
return;
switch( h->mb.i_partition )
{
case D_16x16:
if( h->mb.i_type == B_BI_BI )
x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
break;
case D_16x8:
for( i=0; i<2; i++ )
if( a->i_mb_partition16x8[i] == D_BI_8x8 )
x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
break;
case D_8x16:
for( i=0; i<2; i++ )
if( a->i_mb_partition8x16[i] == D_BI_8x8 )
x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
break;
case D_8x8:
for( i=0; i<4; i++ )
if( h->mb.i_sub_partition[i] == D_BI_8x8 )
x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
break;
}
}
static inline void x264_mb_analyse_transform( x264_t *h )
{
if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 && !h->mb.b_lossless )
{
int i_cost4, i_cost8;
/* Only luma MC is really needed, but the full MC is re-used in macroblock_encode. */
x264_mb_mc( h );
i_cost8 = h->pixf.sa8d[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE,
h->mb.pic.p_fdec[0], FDEC_STRIDE );
i_cost4 = h->pixf.satd[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE,
h->mb.pic.p_fdec[0], FDEC_STRIDE );
h->mb.b_transform_8x8 = i_cost8 < i_cost4;
h->mb.b_skip_mc = 1;
}
}
static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t *a, int *i_satd, int *i_rd )
{
if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 )
{
int i_rd8;
x264_analyse_update_cache( h, a );
h->mb.b_transform_8x8 = !h->mb.b_transform_8x8;
/* FIXME only luma is needed, but the score for comparison already includes chroma */
i_rd8 = x264_rd_cost_mb( h, a->i_lambda2 );
if( *i_rd >= i_rd8 )
{
if( *i_rd > 0 )
*i_satd = (int64_t)(*i_satd) * i_rd8 / *i_rd;
/* prevent a rare division by zero in estimated intra cost */
if( *i_satd == 0 )
*i_satd = 1;
*i_rd = i_rd8;
}
else
h->mb.b_transform_8x8 = !h->mb.b_transform_8x8;
}
}
/*****************************************************************************
* x264_macroblock_analyse:
*****************************************************************************/
void x264_macroblock_analyse( x264_t *h )
{
x264_mb_analysis_t analysis;
int i_cost = COST_MAX;
int i;
h->mb.i_qp = x264_ratecontrol_qp( h );
if( h->param.rc.i_aq_mode )
x264_adaptive_quant( h );
x264_mb_analyse_init( h, &analysis, h->mb.i_qp );
/*--------------------------- Do the analysis ---------------------------*/
if( h->sh.i_type == SLICE_TYPE_I )
{
if( analysis.i_mbrd )
x264_mb_cache_fenc_satd( h );
x264_mb_analyse_intra( h, &analysis, COST_MAX );
if( analysis.i_mbrd )
x264_intra_rd( h, &analysis, COST_MAX );
i_cost = analysis.i_satd_i16x16;
h->mb.i_type = I_16x16;
COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, h->mb.i_type, I_4x4 );
COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, h->mb.i_type, I_8x8 );
if( analysis.i_satd_pcm < i_cost )
h->mb.i_type = I_PCM;
else if( analysis.i_mbrd >= 2 )
x264_intra_rd_refine( h, &analysis );
}
else if( h->sh.i_type == SLICE_TYPE_P )
{
int b_skip = 0;
int i_intra_cost, i_intra_type;
h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 0 );
/* Fast P_SKIP detection */
analysis.b_try_pskip = 0;
if( h->param.analyse.b_fast_pskip )
{
if( h->param.i_threads > 1 && h->mb.cache.pskip_mv[1] > h->mb.mv_max_spel[1] )
// FIXME don't need to check this if the reference frame is done
{}
else if( h->param.analyse.i_subpel_refine >= 3 )
analysis.b_try_pskip = 1;
else if( h->mb.i_mb_type_left == P_SKIP ||
h->mb.i_mb_type_top == P_SKIP ||
h->mb.i_mb_type_topleft == P_SKIP ||
h->mb.i_mb_type_topright == P_SKIP )
b_skip = x264_macroblock_probe_pskip( h );
}
h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 1 );
if( b_skip )
{
h->mb.i_type = P_SKIP;
h->mb.i_partition = D_16x16;
assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
}
else
{
const unsigned int flags = h->param.analyse.inter;
int i_type;
int i_partition;
int i_thresh16x8;
int i_satd_inter, i_satd_intra;
x264_mb_analyse_load_costs( h, &analysis );
x264_mb_analyse_inter_p16x16( h, &analysis );
if( h->mb.i_type == P_SKIP )
return;
if( flags & X264_ANALYSE_PSUB16x16 )
{
if( h->param.analyse.b_mixed_references )
x264_mb_analyse_inter_p8x8_mixed_ref( h, &analysis );
else
x264_mb_analyse_inter_p8x8( h, &analysis );
}
/* Select best inter mode */
i_type = P_L0;
i_partition = D_16x16;
i_cost = analysis.l0.me16x16.cost;
if( ( flags & X264_ANALYSE_PSUB16x16 ) &&
analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost )
{
i_type = P_8x8;
i_partition = D_8x8;
i_cost = analysis.l0.i_cost8x8;
/* Do sub 8x8 */
if( flags & X264_ANALYSE_PSUB8x8 )
{
for( i = 0; i < 4; i++ )
{
x264_mb_analyse_inter_p4x4( h, &analysis, i );
if( analysis.l0.i_cost4x4[i] < analysis.l0.me8x8[i].cost )
{
int i_cost8x8 = analysis.l0.i_cost4x4[i];
h->mb.i_sub_partition[i] = D_L0_4x4;
x264_mb_analyse_inter_p8x4( h, &analysis, i );
COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost8x4[i],
h->mb.i_sub_partition[i], D_L0_8x4 );
x264_mb_analyse_inter_p4x8( h, &analysis, i );
COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost4x8[i],
h->mb.i_sub_partition[i], D_L0_4x8 );
i_cost += i_cost8x8 - analysis.l0.me8x8[i].cost;
}
x264_mb_cache_mv_p8x8( h, &analysis, i );
}
analysis.l0.i_cost8x8 = i_cost;
}
}
/* Now do 16x8/8x16 */
i_thresh16x8 = analysis.l0.me8x8[1].cost_mv + analysis.l0.me8x8[2].cost_mv;
if( ( flags & X264_ANALYSE_PSUB16x16 ) &&
analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost + i_thresh16x8 )
{
x264_mb_analyse_inter_p16x8( h, &analysis );
COPY3_IF_LT( i_cost, analysis.l0.i_cost16x8, i_type, P_L0, i_partition, D_16x8 );
x264_mb_analyse_inter_p8x16( h, &analysis );
COPY3_IF_LT( i_cost, analysis.l0.i_cost8x16, i_type, P_L0, i_partition, D_8x16 );
}
h->mb.i_partition = i_partition;
/* refine qpel */
//FIXME mb_type costs?
if( analysis.i_mbrd )
{
/* refine later */
}
else if( i_partition == D_16x16 )
{
x264_me_refine_qpel( h, &analysis.l0.me16x16 );
i_cost = analysis.l0.me16x16.cost;
}
else if( i_partition == D_16x8 )
{
x264_me_refine_qpel( h, &analysis.l0.me16x8[0] );
x264_me_refine_qpel( h, &analysis.l0.me16x8[1] );
i_cost = analysis.l0.me16x8[0].cost + analysis.l0.me16x8[1].cost;
}
else if( i_partition == D_8x16 )
{
x264_me_refine_qpel( h, &analysis.l0.me8x16[0] );
x264_me_refine_qpel( h, &analysis.l0.me8x16[1] );
i_cost = analysis.l0.me8x16[0].cost + analysis.l0.me8x16[1].cost;
}
else if( i_partition == D_8x8 )
{
int i8x8;
i_cost = 0;
for( i8x8 = 0; i8x8 < 4; i8x8++ )
{
switch( h->mb.i_sub_partition[i8x8] )
{
case D_L0_8x8:
x264_me_refine_qpel( h, &analysis.l0.me8x8[i8x8] );
i_cost += analysis.l0.me8x8[i8x8].cost;
break;
case D_L0_8x4:
x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][0] );
x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][1] );
i_cost += analysis.l0.me8x4[i8x8][0].cost +
analysis.l0.me8x4[i8x8][1].cost;
break;
case D_L0_4x8:
x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][0] );
x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][1] );
i_cost += analysis.l0.me4x8[i8x8][0].cost +
analysis.l0.me4x8[i8x8][1].cost;
break;
case D_L0_4x4:
x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][0] );
x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][1] );
x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][2] );
x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][3] );
i_cost += analysis.l0.me4x4[i8x8][0].cost +
analysis.l0.me4x4[i8x8][1].cost +
analysis.l0.me4x4[i8x8][2].cost +
analysis.l0.me4x4[i8x8][3].cost;
break;
default:
x264_log( h, X264_LOG_ERROR, "internal error (!8x8 && !4x4)\n" );
break;
}
}
}
if( h->mb.b_chroma_me )
{
x264_mb_analyse_intra_chroma( h, &analysis );
x264_mb_analyse_intra( h, &analysis, i_cost - analysis.i_satd_i8x8chroma );
analysis.i_satd_i16x16 += analysis.i_satd_i8x8chroma;
analysis.i_satd_i8x8 += analysis.i_satd_i8x8chroma;
analysis.i_satd_i4x4 += analysis.i_satd_i8x8chroma;
}
else
x264_mb_analyse_intra( h, &analysis, i_cost );
i_satd_inter = i_cost;
i_satd_intra = X264_MIN3( analysis.i_satd_i16x16,
analysis.i_satd_i8x8,
analysis.i_satd_i4x4 );
if( analysis.i_mbrd )
{
x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
i_type = P_L0;
i_partition = D_16x16;
i_cost = analysis.l0.me16x16.cost;
COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
h->mb.i_type = i_type;
h->mb.i_partition = i_partition;
if( i_cost < COST_MAX )
x264_mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
x264_intra_rd( h, &analysis, i_satd_inter * 5/4 );
}
i_intra_type = I_16x16;
i_intra_cost = analysis.i_satd_i16x16;
COPY2_IF_LT( i_intra_cost, analysis.i_satd_i8x8, i_intra_type, I_8x8 );
COPY2_IF_LT( i_intra_cost, analysis.i_satd_i4x4, i_intra_type, I_4x4 );
COPY2_IF_LT( i_intra_cost, analysis.i_satd_pcm, i_intra_type, I_PCM );
COPY2_IF_LT( i_cost, i_intra_cost, i_type, i_intra_type );
if( i_intra_cost == COST_MAX )
i_intra_cost = i_cost * i_satd_intra / i_satd_inter + 1;
h->mb.i_type = i_type;
h->stat.frame.i_intra_cost += i_intra_cost;
h->stat.frame.i_inter_cost += i_cost;
h->stat.frame.i_mbs_analysed++;
if( analysis.i_mbrd >= 2 && h->mb.i_type != I_PCM )
{
if( IS_INTRA( h->mb.i_type ) )
{
x264_intra_rd_refine( h, &analysis );
}
else if( i_partition == D_16x16 )
{
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
}
else if( i_partition == D_16x8 )
{
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, analysis.l0.me16x8[0].i_ref );
x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, analysis.l0.me16x8[1].i_ref );
x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 8, 0 );
}
else if( i_partition == D_8x16 )
{
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, analysis.l0.me8x16[0].i_ref );
x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, analysis.l0.me8x16[1].i_ref );
x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 4, 0 );
}
else if( i_partition == D_8x8 )
{
int i8x8;
x264_analyse_update_cache( h, &analysis );
for( i8x8 = 0; i8x8 < 4; i8x8++ )
{
if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 )
{
x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8*4, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 )
{
x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 )
{
x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 )
{
x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
}
}
}
}
}
}
else if( h->sh.i_type == SLICE_TYPE_B )
{
int i_bskip_cost = COST_MAX;
int b_skip = 0;
if( analysis.i_mbrd )
x264_mb_cache_fenc_satd( h );
h->mb.i_type = B_SKIP;
if( h->mb.b_direct_auto_write )
{
/* direct=auto heuristic: prefer whichever mode allows more Skip macroblocks */
for( i = 0; i < 2; i++ )
{
int b_changed = 1;
h->sh.b_direct_spatial_mv_pred ^= 1;
analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, i && analysis.b_direct_available ? &b_changed : NULL );
if( analysis.b_direct_available )
{
if( b_changed )
{
x264_mb_mc( h );
b_skip = x264_macroblock_probe_bskip( h );
}
h->stat.frame.i_direct_score[ h->sh.b_direct_spatial_mv_pred ] += b_skip;
}
else
b_skip = 0;
}
}
else
analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, NULL );
if( analysis.b_direct_available )
{
if( !h->mb.b_direct_auto_write )
x264_mb_mc( h );
if( h->mb.b_lossless )
{
/* chance of skip is too small to bother */
}
else if( analysis.i_mbrd )
{
i_bskip_cost = ssd_mb( h );
/* 6 = minimum cavlc cost of a non-skipped MB */
b_skip = h->mb.b_skip_mc = i_bskip_cost <= ((6 * analysis.i_lambda2 + 128) >> 8);
}
else if( !h->mb.b_direct_auto_write )
{
/* Conditioning the probe on neighboring block types
* doesn't seem to help speed or quality. */
b_skip = x264_macroblock_probe_bskip( h );
}
}
if( !b_skip )
{
const unsigned int flags = h->param.analyse.inter;
int i_type;
int i_partition;
int i_satd_inter = 0; // shut up uninitialized warning
h->mb.b_skip_mc = 0;
x264_mb_analyse_load_costs( h, &analysis );
/* select best inter mode */
/* direct must be first */
if( analysis.b_direct_available )
x264_mb_analyse_inter_direct( h, &analysis );
x264_mb_analyse_inter_b16x16( h, &analysis );
i_type = B_L0_L0;
i_partition = D_16x16;
i_cost = analysis.l0.me16x16.cost;
COPY2_IF_LT( i_cost, analysis.l1.me16x16.cost, i_type, B_L1_L1 );
COPY2_IF_LT( i_cost, analysis.i_cost16x16bi, i_type, B_BI_BI );
COPY2_IF_LT( i_cost, analysis.i_cost16x16direct, i_type, B_DIRECT );
if( analysis.i_mbrd && analysis.i_cost16x16direct <= i_cost * 33/32 )
{
x264_mb_analyse_b_rd( h, &analysis, i_cost );
if( i_bskip_cost < analysis.i_rd16x16direct &&
i_bskip_cost < analysis.i_rd16x16bi &&
i_bskip_cost < analysis.l0.i_rd16x16 &&
i_bskip_cost < analysis.l1.i_rd16x16 )
{
h->mb.i_type = B_SKIP;
x264_analyse_update_cache( h, &analysis );
return;
}
}
if( flags & X264_ANALYSE_BSUB16x16 )
{
x264_mb_analyse_inter_b8x8( h, &analysis );
if( analysis.i_cost8x8bi < i_cost )
{
i_type = B_8x8;
i_partition = D_8x8;
i_cost = analysis.i_cost8x8bi;
if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[1] ||
h->mb.i_sub_partition[2] == h->mb.i_sub_partition[3] )
{
x264_mb_analyse_inter_b16x8( h, &analysis );
COPY3_IF_LT( i_cost, analysis.i_cost16x8bi,
i_type, analysis.i_mb_type16x8,
i_partition, D_16x8 );
}
if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[2] ||
h->mb.i_sub_partition[1] == h->mb.i_sub_partition[3] )
{
x264_mb_analyse_inter_b8x16( h, &analysis );
COPY3_IF_LT( i_cost, analysis.i_cost8x16bi,
i_type, analysis.i_mb_type8x16,
i_partition, D_8x16 );
}
}
}
if( analysis.i_mbrd )
{
/* refine later */
}
/* refine qpel */
else if( i_partition == D_16x16 )
{
analysis.l0.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
analysis.l1.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
if( i_type == B_L0_L0 )
{
x264_me_refine_qpel( h, &analysis.l0.me16x16 );
i_cost = analysis.l0.me16x16.cost
+ analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
}
else if( i_type == B_L1_L1 )
{
x264_me_refine_qpel( h, &analysis.l1.me16x16 );
i_cost = analysis.l1.me16x16.cost
+ analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
}
else if( i_type == B_BI_BI )
{
x264_me_refine_qpel( h, &analysis.l0.me16x16 );
x264_me_refine_qpel( h, &analysis.l1.me16x16 );
}
}
else if( i_partition == D_16x8 )
{
for( i=0; i<2; i++ )
{
if( analysis.i_mb_partition16x8[i] != D_L1_8x8 )
x264_me_refine_qpel( h, &analysis.l0.me16x8[i] );
if( analysis.i_mb_partition16x8[i] != D_L0_8x8 )
x264_me_refine_qpel( h, &analysis.l1.me16x8[i] );
}
}
else if( i_partition == D_8x16 )
{
for( i=0; i<2; i++ )
{
if( analysis.i_mb_partition8x16[i] != D_L1_8x8 )
x264_me_refine_qpel( h, &analysis.l0.me8x16[i] );
if( analysis.i_mb_partition8x16[i] != D_L0_8x8 )
x264_me_refine_qpel( h, &analysis.l1.me8x16[i] );
}
}
else if( i_partition == D_8x8 )
{
for( i=0; i<4; i++ )
{
x264_me_t *m;
int i_part_cost_old;
int i_type_cost;
int i_part_type = h->mb.i_sub_partition[i];
int b_bidir = (i_part_type == D_BI_8x8);
if( i_part_type == D_DIRECT_8x8 )
continue;
if( x264_mb_partition_listX_table[0][i_part_type] )
{
m = &analysis.l0.me8x8[i];
i_part_cost_old = m->cost;
i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
m->cost -= i_type_cost;
x264_me_refine_qpel( h, m );
if( !b_bidir )
analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
}
if( x264_mb_partition_listX_table[1][i_part_type] )
{
m = &analysis.l1.me8x8[i];
i_part_cost_old = m->cost;
i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
m->cost -= i_type_cost;
x264_me_refine_qpel( h, m );
if( !b_bidir )
analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
}
/* TODO: update mvp? */
}
}
if( analysis.i_mbrd )
{
i_satd_inter = i_cost;
x264_mb_analyse_b_rd( h, &analysis, i_satd_inter );
i_type = B_SKIP;
i_cost = i_bskip_cost;
i_partition = D_16x16;
COPY2_IF_LT( i_cost, analysis.l0.i_rd16x16, i_type, B_L0_L0 );
COPY2_IF_LT( i_cost, analysis.l1.i_rd16x16, i_type, B_L1_L1 );
COPY2_IF_LT( i_cost, analysis.i_rd16x16bi, i_type, B_BI_BI );
COPY2_IF_LT( i_cost, analysis.i_rd16x16direct, i_type, B_DIRECT );
COPY3_IF_LT( i_cost, analysis.i_rd16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 );
COPY3_IF_LT( i_cost, analysis.i_rd8x16bi, i_type, analysis.i_mb_type8x16, i_partition, D_8x16 );
COPY3_IF_LT( i_cost, analysis.i_rd8x8bi, i_type, B_8x8, i_partition, D_8x8 );
h->mb.i_type = i_type;
h->mb.i_partition = i_partition;
}
x264_mb_analyse_intra( h, &analysis, i_satd_inter );
if( analysis.i_mbrd )
{
x264_mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
x264_intra_rd( h, &analysis, i_satd_inter * 17/16 );
}
COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 );
COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 );
COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 );
COPY2_IF_LT( i_cost, analysis.i_satd_pcm, i_type, I_PCM );
h->mb.i_type = i_type;
h->mb.i_partition = i_partition;
if( analysis.i_mbrd >= 2 && IS_INTRA( i_type ) && i_type != I_PCM )
x264_intra_rd_refine( h, &analysis );
if( h->mb.i_subpel_refine >= 5 )
x264_refine_bidir( h, &analysis );
if( analysis.i_mbrd >= 2 && i_type > B_DIRECT && i_type < B_SKIP )
{
const int i_biweight = h->mb.bipred_weight[analysis.l0.i_ref][analysis.l1.i_ref];
x264_analyse_update_cache( h, &analysis );
if( i_partition == D_16x16 )
{
if( i_type == B_L0_L0 )
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
else if( i_type == B_L1_L1 )
x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 );
else if( i_type == B_BI_BI )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 );
}
else if( i_partition == D_16x8 )
{
for( i = 0; i < 2; i++ )
{
h->mb.i_sub_partition[i*2] = h->mb.i_sub_partition[i*2+1] = analysis.i_mb_partition16x8[i];
if( analysis.i_mb_partition16x8[i] == D_L0_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[i], analysis.i_lambda2, i*8, 0 );
else if( analysis.i_mb_partition16x8[i] == D_L1_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*8, 1 );
else if( analysis.i_mb_partition16x8[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x8[i], &analysis.l1.me16x8[i], i_biweight, i*2, analysis.i_lambda2 );
}
}
else if( i_partition == D_8x16 )
{
for( i = 0; i < 2; i++ )
{
h->mb.i_sub_partition[i] = h->mb.i_sub_partition[i+2] = analysis.i_mb_partition8x16[i];
if( analysis.i_mb_partition8x16[i] == D_L0_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[i], analysis.i_lambda2, i*4, 0 );
else if( analysis.i_mb_partition8x16[i] == D_L1_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i*4, 1 );
else if( analysis.i_mb_partition8x16[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me8x16[i], &analysis.l1.me8x16[i], i_biweight, i, analysis.i_lambda2 );
}
}
else if( i_partition == D_8x8 )
{
for( i = 0; i < 4; i++ )
{
if( h->mb.i_sub_partition[i] == D_L0_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i*4, 0 );
else if( h->mb.i_sub_partition[i] == D_L1_8x8 )
x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i*4, 1 );
else if( h->mb.i_sub_partition[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me8x8[i], &analysis.l1.me8x8[i], i_biweight, i, analysis.i_lambda2 );
}
}
}
}
}
x264_analyse_update_cache( h, &analysis );
if( !analysis.i_mbrd )
x264_mb_analyse_transform( h );
h->mb.b_trellis = h->param.analyse.i_trellis;
h->mb.b_noise_reduction = !!h->param.analyse.i_noise_reduction;
if( !IS_SKIP(h->mb.i_type) && h->mb.i_psy_trellis && h->param.analyse.i_trellis == 1 )
x264_psy_trellis_init( h, 0 );
if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction )
h->mb.i_skip_intra = 0;
}
/*-------------------- Update MB from the analysis ----------------------*/
static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a )
{
int i;
switch( h->mb.i_type )
{
case I_4x4:
for( i = 0; i < 16; i++ )
h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] = a->i_predict4x4[i];
x264_mb_analyse_intra_chroma( h, a );
break;
case I_8x8:
for( i = 0; i < 4; i++ )
x264_macroblock_cache_intra8x8_pred( h, 2*(i&1), 2*(i>>1), a->i_predict8x8[i] );
x264_mb_analyse_intra_chroma( h, a );
break;
case I_16x16:
h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
x264_mb_analyse_intra_chroma( h, a );
break;
case I_PCM:
break;
case P_L0:
switch( h->mb.i_partition )
{
case D_16x16:
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
break;
case D_16x8:
x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].i_ref );
x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].mv );
x264_macroblock_cache_mv_ptr( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].mv );
break;
case D_8x16:
x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].i_ref );
x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].mv );
x264_macroblock_cache_mv_ptr( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].mv );
break;
default:
x264_log( h, X264_LOG_ERROR, "internal error P_L0 and partition=%d\n", h->mb.i_partition );
break;
}
break;
case P_8x8:
x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref );
x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref );
x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref );
x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref );
for( i = 0; i < 4; i++ )
x264_mb_cache_mv_p8x8( h, a, i );
break;
case P_SKIP:
{
h->mb.i_partition = D_16x16;
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, h->mb.cache.pskip_mv );
break;
}
case B_SKIP:
case B_DIRECT:
x264_mb_load_mv_direct8x8( h, 0 );
x264_mb_load_mv_direct8x8( h, 1 );
x264_mb_load_mv_direct8x8( h, 2 );
x264_mb_load_mv_direct8x8( h, 3 );
break;
case B_8x8:
/* optimize: cache might not need to be rewritten */
for( i = 0; i < 4; i++ )
x264_mb_cache_mv_b8x8( h, a, i, 1 );
break;
default: /* the rest of the B types */
switch( h->mb.i_partition )
{
case D_16x16:
switch( h->mb.i_type )
{
case B_L0_L0:
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, -1 );
x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 1, 0 );
x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 1, 0 );
break;
case B_L1_L1:
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, -1 );
x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, 0 );
x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 0, 0 );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv );
break;
case B_BI_BI:
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv );
break;
}
break;
case D_16x8:
x264_mb_cache_mv_b16x8( h, a, 0, 1 );
x264_mb_cache_mv_b16x8( h, a, 1, 1 );
break;
case D_8x16:
x264_mb_cache_mv_b8x16( h, a, 0, 1 );
x264_mb_cache_mv_b8x16( h, a, 1, 1 );
break;
default:
x264_log( h, X264_LOG_ERROR, "internal error (invalid MB type)\n" );
break;
}
}
#ifndef NDEBUG
if( h->param.i_threads > 1 && !IS_INTRA(h->mb.i_type) )
{
int l;
for( l=0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
{
int completed;
int ref = h->mb.cache.ref[l][x264_scan8[0]];
if( ref < 0 )
continue;
completed = (l ? h->fref1 : h->fref0)[ ref >> h->mb.b_interlaced ]->i_lines_completed;
if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - h->mb.b_interlaced)) + h->mb.i_mb_y*16 > completed )
{
x264_log( h, X264_LOG_WARNING, "internal error (MV out of thread range)\n");
fprintf(stderr, "mb type: %d \n", h->mb.i_type);
fprintf(stderr, "mv: l%dr%d (%d,%d) \n", l, ref,
h->mb.cache.mv[l][x264_scan8[15]][0],
h->mb.cache.mv[l][x264_scan8[15]][1] );
fprintf(stderr, "limit: %d \n", h->mb.mv_max_spel[1]);
fprintf(stderr, "mb_xy: %d,%d \n", h->mb.i_mb_x, h->mb.i_mb_y);
fprintf(stderr, "completed: %d \n", completed );
x264_log( h, X264_LOG_WARNING, "recovering by using intra mode\n");
x264_mb_analyse_intra( h, a, COST_MAX );
h->mb.i_type = I_16x16;
h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
x264_mb_analyse_intra_chroma( h, a );
}
}
}
#endif
}
#include "slicetype.c"
|
convolution_3x3_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j+7<outw; j+=8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %8.4s, v1.s[0] \n"
"fmla v29.4s, %8.4s, v1.s[1] \n"
"fmla v30.4s, %8.4s, v1.s[2] \n"
"fmla v31.4s, %8.4s, v1.s[3] \n"
"ld1 {v2.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %9.4s, v1.s[1] \n"
"fmla v29.4s, %9.4s, v1.s[2] \n"
"fmla v30.4s, %9.4s, v1.s[3] \n"
"fmla v31.4s, %9.4s, v2.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2], #32 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %10.4s, v1.s[2] \n"
"fmla v29.4s, %10.4s, v1.s[3] \n"
"fmla v30.4s, %10.4s, v2.s[0] \n"
"fmla v31.4s, %10.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v4.s[0] \n"
"fmla v25.4s, %11.4s, v4.s[1] \n"
"fmla v26.4s, %11.4s, v4.s[2] \n"
"fmla v27.4s, %11.4s, v4.s[3] \n"
"fmla v28.4s, %11.4s, v5.s[0] \n"
"fmla v29.4s, %11.4s, v5.s[1] \n"
"fmla v30.4s, %11.4s, v5.s[2] \n"
"fmla v31.4s, %11.4s, v5.s[3] \n"
"fmla v24.4s, %12.4s, v4.s[1] \n"
"fmla v25.4s, %12.4s, v4.s[2] \n"
"fmla v26.4s, %12.4s, v4.s[3] \n"
"fmla v27.4s, %12.4s, v5.s[0] \n"
"fmla v28.4s, %12.4s, v5.s[1] \n"
"fmla v29.4s, %12.4s, v5.s[2] \n"
"fmla v30.4s, %12.4s, v5.s[3] \n"
"fmla v31.4s, %12.4s, v2.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"fmla v24.4s, %13.4s, v4.s[2] \n"
"fmla v25.4s, %13.4s, v4.s[3] \n"
"fmla v26.4s, %13.4s, v5.s[0] \n"
"fmla v27.4s, %13.4s, v5.s[1] \n"
"fmla v28.4s, %13.4s, v5.s[2] \n"
"fmla v29.4s, %13.4s, v5.s[3] \n"
"fmla v30.4s, %13.4s, v2.s[0] \n"
"fmla v31.4s, %13.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %14.4s, v1.s[0] \n"
"fmla v29.4s, %14.4s, v1.s[1] \n"
"fmla v30.4s, %14.4s, v1.s[2] \n"
"fmla v31.4s, %14.4s, v1.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %15.4s, v1.s[1] \n"
"fmla v29.4s, %15.4s, v1.s[2] \n"
"fmla v30.4s, %15.4s, v1.s[3] \n"
"fmla v31.4s, %15.4s, v2.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %16.4s, v1.s[2] \n"
"fmla v29.4s, %16.4s, v1.s[3] \n"
"fmla v30.4s, %16.4s, v2.s[0] \n"
"fmla v31.4s, %16.4s, v2.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#endif // __aarch64__
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"ld1 {v1.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"ld1 {v3.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"ld1 {v1.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"vmla.f32 q12, %q8, d0[0] \n"
"vmla.f32 q13, %q8, d0[1] \n"
"vmla.f32 q14, %q8, d1[0] \n"
"vmla.f32 q15, %q8, d1[1] \n"
"vld1.f32 {d2}, [%1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"vmla.f32 q14, %q9, d1[1] \n"
"vmla.f32 q15, %q9, d2[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"vmla.f32 q12, %q10, d1[0] \n"
"vmla.f32 q13, %q10, d1[1] \n"
"vmla.f32 q14, %q10, d2[0] \n"
"vmla.f32 q15, %q10, d2[1] \n"
"vmla.f32 q12, %q11, d4[0] \n"
"vmla.f32 q13, %q11, d4[1] \n"
"vmla.f32 q14, %q11, d5[0] \n"
"vmla.f32 q15, %q11, d5[1] \n"
"vld1.f32 {d3}, [%2] \n"
"vmla.f32 q12, %q12, d4[1] \n"
"vmla.f32 q13, %q12, d5[0] \n"
"vmla.f32 q14, %q12, d5[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q12, %q13, d5[0] \n"
"vmla.f32 q13, %q13, d5[1] \n"
"vmla.f32 q14, %q13, d3[0] \n"
"vmla.f32 q15, %q13, d3[1] \n"
"vmla.f32 q12, %q14, d0[0] \n"
"vmla.f32 q13, %q14, d0[1] \n"
"vmla.f32 q14, %q14, d1[0] \n"
"vmla.f32 q15, %q14, d1[1] \n"
"vld1.f32 {d2}, [%3] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q15, d1[1] \n"
"vmla.f32 q15, %q15, d2[0] \n"
"vmla.f32 q12, %q16, d1[0] \n"
"vmla.f32 q13, %q16, d1[1] \n"
"vmla.f32 q14, %q16, d2[0] \n"
"vmla.f32 q15, %q16, d2[1] \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4s, v25.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmul v26.4s, %8.4s, v0.s[0] \n"
"fmul v27.4s, %8.4s, v0.s[1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.4s}, [%2] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"add %1, %1, #8 \n"
"fmla v26.4s, %12.4s, v1.s[1] \n"
"fmla v27.4s, %12.4s, v1.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3] \n"
"fmla v24.4s, %13.4s, v1.s[2] \n"
"fmla v25.4s, %13.4s, v1.s[3] \n"
"fmla v26.4s, %14.4s, v0.s[0] \n"
"fmla v27.4s, %14.4s, v0.s[1] \n"
"add %2, %2, #8 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"add %3, %3, #8 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"st1 {v24.4s, v25.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmul.f32 q14, %q8, d0[0] \n"
"vmul.f32 q15, %q8, d0[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d2-d3}, [%2] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vmla.f32 q12, %q11, d2[0] \n"
"vmla.f32 q13, %q11, d2[1] \n"
"add %1, %1, #8 \n"
"vmla.f32 q14, %q12, d2[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3] \n"
"vmla.f32 q12, %q13, d3[0] \n"
"vmla.f32 q13, %q13, d3[1] \n"
"vmla.f32 q14, %q14, d0[0] \n"
"vmla.f32 q15, %q14, d0[1] \n"
"add %2, %2, #8 \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"add %3, %3, #8 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9*4;
}
}
}
static void conv3x3s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p+1) * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p+1);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00_0 = vld1q_f32(k0);
float32x4_t _k01_0 = vld1q_f32(k0+4);
float32x4_t _k02_0 = vld1q_f32(k0+8);
float32x4_t _k10_0 = vld1q_f32(k0+12);
float32x4_t _k11_0 = vld1q_f32(k0+16);
float32x4_t _k12_0 = vld1q_f32(k0+20);
float32x4_t _k20_0 = vld1q_f32(k0+24);
float32x4_t _k21_0 = vld1q_f32(k0+28);
float32x4_t _k22_0 = vld1q_f32(k0+32);
float32x4_t _k00_1 = vld1q_f32(k1);
float32x4_t _k01_1 = vld1q_f32(k1+4);
float32x4_t _k02_1 = vld1q_f32(k1+8);
float32x4_t _k10_1 = vld1q_f32(k1+12);
float32x4_t _k11_1 = vld1q_f32(k1+16);
float32x4_t _k12_1 = vld1q_f32(k1+20);
float32x4_t _k20_1 = vld1q_f32(k1+24);
float32x4_t _k21_1 = vld1q_f32(k1+28);
float32x4_t _k22_1 = vld1q_f32(k1+32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0
// r0
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"ld1r {v4.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[0] \n"
"fmla v7.4s, %12.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n"// sum1
"fmla v8.4s, %12.4s, v1.s[0] \n"
"fmla v9.4s, %12.4s, v1.s[2] \n"
"fmla v10.4s, %21.4s, v0.s[0] \n"
"fmla v11.4s, %21.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v1.s[0] \n"
"fmla v13.4s, %21.4s, v1.s[2] \n"
"fmla v6.4s, %13.4s, v0.s[1] \n"
"fmla v7.4s, %13.4s, v0.s[3] \n"
"fmla v8.4s, %13.4s, v1.s[1] \n"
"fmla v9.4s, %13.4s, v1.s[3] \n"
"fmla v10.4s, %22.4s, v0.s[1] \n"
"fmla v11.4s, %22.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v1.s[1] \n"
"fmla v13.4s, %22.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4s, v3.4s}, [%4], #32 \n"
"ld1r {v5.4s}, [%4] \n"
"fmla v6.4s, %14.4s, v0.s[2] \n"
"fmla v7.4s, %14.4s, v1.s[0] \n"
"fmla v8.4s, %14.4s, v1.s[2] \n"
"fmla v9.4s, %14.4s, v4.s[0] \n"
"fmla v10.4s, %23.4s, v0.s[2] \n"
"fmla v11.4s, %23.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v1.s[2] \n"
"fmla v13.4s, %23.4s, v4.s[0] \n"
"fmla v6.4s, %15.4s, v2.s[0] \n"
"fmla v7.4s, %15.4s, v2.s[2] \n"
"fmla v8.4s, %15.4s, v3.s[0] \n"
"fmla v9.4s, %15.4s, v3.s[2] \n"
"fmla v10.4s, %24.4s, v2.s[0] \n"
"fmla v11.4s, %24.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v3.s[0] \n"
"fmla v13.4s, %24.4s, v3.s[2] \n"
"fmla v6.4s, %16.4s, v2.s[1] \n"
"fmla v7.4s, %16.4s, v2.s[3] \n"
"fmla v8.4s, %16.4s, v3.s[1] \n"
"fmla v9.4s, %16.4s, v3.s[3] \n"
"fmla v10.4s, %25.4s, v2.s[1] \n"
"fmla v11.4s, %25.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v3.s[1] \n"
"fmla v13.4s, %25.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"
"ld1r {v4.4s}, [%5] \n"
"fmla v6.4s, %17.4s, v2.s[2] \n"
"fmla v7.4s, %17.4s, v3.s[0] \n"
"fmla v8.4s, %17.4s, v3.s[2] \n"
"fmla v9.4s, %17.4s, v5.s[0] \n"
"fmla v10.4s, %26.4s, v2.s[2] \n"
"fmla v11.4s, %26.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v3.s[2] \n"
"fmla v13.4s, %26.4s, v5.s[0] \n"
"fmla v6.4s, %18.4s, v0.s[0] \n"
"fmla v7.4s, %18.4s, v0.s[2] \n"
"fmla v8.4s, %18.4s, v1.s[0] \n"
"fmla v9.4s, %18.4s, v1.s[2] \n"
"fmla v10.4s, %27.4s, v0.s[0] \n"
"fmla v11.4s, %27.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v1.s[0] \n"
"fmla v13.4s, %27.4s, v1.s[2] \n"
"fmla v6.4s, %19.4s, v0.s[1] \n"
"fmla v7.4s, %19.4s, v0.s[3] \n"
"fmla v8.4s, %19.4s, v1.s[1] \n"
"fmla v9.4s, %19.4s, v1.s[3] \n"
"fmla v10.4s, %28.4s, v0.s[1] \n"
"fmla v11.4s, %28.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v1.s[1] \n"
"fmla v13.4s, %28.4s, v1.s[3] \n"
"fmla v6.4s, %20.4s, v0.s[2] \n"
"fmla v7.4s, %20.4s, v1.s[0] \n"
"fmla v8.4s, %20.4s, v1.s[2] \n"
"fmla v9.4s, %20.4s, v4.s[0] \n"
"fmla v10.4s, %29.4s, v0.s[2] \n"
"fmla v11.4s, %29.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v1.s[2] \n"
"fmla v13.4s, %29.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"
);
}
for (; remain>0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr1 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9*4;
k1 += 9*4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"
"ld1r {v4.4s}, [%2] \n"
"fmla v6.4s, %10.4s, v0.s[0] \n"
"fmla v7.4s, %10.4s, v0.s[2] \n"
"fmla v8.4s, %10.4s, v1.s[0] \n"
"fmla v9.4s, %10.4s, v1.s[2] \n"
"fmla v6.4s, %11.4s, v0.s[1] \n"
"fmla v7.4s, %11.4s, v0.s[3] \n"
"fmla v8.4s, %11.4s, v1.s[1] \n"
"fmla v9.4s, %11.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4s, v3.4s}, [%3], #32 \n"
"ld1r {v5.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"fmla v8.4s, %12.4s, v1.s[2] \n"
"fmla v9.4s, %12.4s, v4.s[0] \n"
"fmla v6.4s, %13.4s, v2.s[0] \n"
"fmla v7.4s, %13.4s, v2.s[2] \n"
"fmla v8.4s, %13.4s, v3.s[0] \n"
"fmla v9.4s, %13.4s, v3.s[2] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"fmla v8.4s, %14.4s, v3.s[1] \n"
"fmla v9.4s, %14.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"
"ld1r {v4.4s}, [%4] \n"
"fmla v6.4s, %15.4s, v2.s[2] \n"
"fmla v7.4s, %15.4s, v3.s[0] \n"
"fmla v8.4s, %15.4s, v3.s[2] \n"
"fmla v9.4s, %15.4s, v5.s[0] \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"fmla v8.4s, %16.4s, v1.s[0] \n"
"fmla v9.4s, %16.4s, v1.s[2] \n"
"fmla v6.4s, %17.4s, v0.s[1] \n"
"fmla v7.4s, %17.4s, v0.s[3] \n"
"fmla v8.4s, %17.4s, v1.s[1] \n"
"fmla v9.4s, %17.4s, v1.s[3] \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fmla v8.4s, %18.4s, v1.s[2] \n"
"fmla v9.4s, %18.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"
);
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #512] \n"
"vldm %1, {d0-d7} \n"// sum0
// r0
"pld [%2, #256] \n"
"vld1.f32 {d8-d11}, [%2]! \n"
"vld1.f32 {d12[]}, [%2] \n"
"vmla.f32 q0, %q10, d8[0] \n"
"vmla.f32 q1, %q10, d9[0] \n"
"vmla.f32 q2, %q10, d10[0] \n"
"vmla.f32 q3, %q10, d11[0] \n"
"vmla.f32 q0, %q11, d8[1] \n"
"vmla.f32 q1, %q11, d9[1] \n"
"vmla.f32 q2, %q11, d10[1] \n"
"vmla.f32 q3, %q11, d11[1] \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vmla.f32 q2, %q12, d11[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d8-d11}, [%3]! \n"
"vld1.f32 {d13[]}, [%3] \n"
"vmla.f32 q3, %q12, d12[0] \n"
"vmla.f32 q0, %q13, d8[0] \n"
"vmla.f32 q1, %q13, d9[0] \n"
"vmla.f32 q2, %q13, d10[0] \n"
"vmla.f32 q3, %q13, d11[0] \n"
"vmla.f32 q0, %q14, d8[1] \n"
"vmla.f32 q1, %q14, d9[1] \n"
"vmla.f32 q2, %q14, d10[1] \n"
"vmla.f32 q3, %q14, d11[1] \n"
"vmla.f32 q0, %q15, d9[0] \n"
"vmla.f32 q1, %q15, d10[0] \n"
"vmla.f32 q2, %q15, d11[0] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4]! \n"
"vld1.f32 {d12[]}, [%4] \n"
"vmla.f32 q3, %q15, d13[0] \n"
"vmla.f32 q0, %q16, d8[0] \n"
"vmla.f32 q1, %q16, d9[0] \n"
"vmla.f32 q2, %q16, d10[0] \n"
"vmla.f32 q3, %q16, d11[0] \n"
"vmla.f32 q0, %q17, d8[1] \n"
"vmla.f32 q1, %q17, d9[1] \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vmla.f32 q0, %q18, d9[0] \n"
"vmla.f32 q1, %q18, d10[0] \n"
"vmla.f32 q2, %q18, d11[0] \n"
"vmla.f32 q3, %q18, d12[0] \n"
"subs %0, %0, #1 \n"
"vstm %1!, {d0-d7} \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
}
#endif // __aarch64__
for (; remain>0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9*4;
}
}
}
|
gemm_cmpt.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "sys/time.h"
#define min(X,Y) (((X) < (Y)) ? (X) : (Y))
#define GEMM_BATCH cblas_dgemm_batch
#define GEMM_COMPUTE_BATCH cblas_dgemm_compute_batch
#define fabst fabs
#define FPTYPE double
#define VECTOR_LENGTH 8
#define REPEAT 1000
#define MEM_ALIGNMENT 64
#define GRP_COUNT 1
int main(int argc, char *argv[])
{
int grp, i, j, ii, jj, nthr;
#pragma omp parallel
{
nthr = omp_get_num_threads();
}
unsigned long op_count = 0;
double startt, stopt, mint, mints;
mint = 1e5;
mints = 1e5;
int group_count = GRP_COUNT;
int pack_length = VECTOR_LENGTH;
int m_init, grp_init;
if (argc > 1) m_init = atoi(argv[1]);
else m_init = 5;
if (argc > 2) grp_init = atoi(argv[2]);
else grp_init = 512;
MKL_INT m[GRP_COUNT] = {m_init};
MKL_INT k[GRP_COUNT] = {m_init};
MKL_INT n[GRP_COUNT] = {m_init};
MKL_INT lda[GRP_COUNT] = {m_init};
MKL_INT ldb[GRP_COUNT] = {m_init};
MKL_INT ldc[GRP_COUNT] = {m_init};
FPTYPE alpha[GRP_COUNT] = {1.0};
FPTYPE beta[GRP_COUNT] = {1.0};
MKL_INT size_per_grp[GRP_COUNT] = {grp_init};
MKL_INT format_tail;
CBLAS_TRANSPOSE TRANSA[GRP_COUNT] = {CblasNoTrans};
CBLAS_TRANSPOSE TRANSB[GRP_COUNT] = {CblasNoTrans};
int num_pointers = 0;
for (i = 0; i < GRP_COUNT; i++) num_pointers += size_per_grp[i];
int a_total = 0;
int b_total = 0;
int c_total = 0;
for (i = 0; i < GRP_COUNT; i++) a_total += m[i] * k[i] * size_per_grp[i];
for (i = 0; i < GRP_COUNT; i++) b_total += k[i] * n[i] * size_per_grp[i];
for (i = 0; i < GRP_COUNT; i++) c_total += m[i] * n[i] * size_per_grp[i];
FPTYPE *a, *b, *c, *d;
a = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT );
b = (FPTYPE *)_mm_malloc( b_total * sizeof(FPTYPE), MEM_ALIGNMENT );
c = (FPTYPE *)_mm_malloc( c_total * sizeof(FPTYPE), MEM_ALIGNMENT );
d = (FPTYPE *)_mm_malloc( c_total * sizeof(FPTYPE), MEM_ALIGNMENT );
for (i = 0; i < a_total; i++) a[i] = rand() / (FPTYPE) RAND_MAX + .5;
for (i = 0; i < b_total; i++) b[i] = rand() / (FPTYPE) RAND_MAX + .5;
int ci = 0;
for (i = 0; i < c_total; i++) {
c[i] = ci;
d[i] = c[i];
ci++;
}
FPTYPE *a_array[num_pointers], *b_array[num_pointers], *c_array[num_pointers], *d_array[num_pointers];
int a_idx = 0;
int b_idx = 0;
int c_idx = 0;
int p_num = 0;
for (i = 0; i < GRP_COUNT; i++) {
for (j = 0; j < size_per_grp[i]; j++) {
a_array[p_num] = &a[ a_idx ];
b_array[p_num] = &b[ b_idx ];
c_array[p_num] = &c[ c_idx ];
d_array[p_num] = &d[ c_idx ];
p_num++;
a_idx += m[i] * k[i];
b_idx += k[i] * n[i];
c_idx += m[i] * n[i];
op_count += 2 * m[i] * n[i] * k[i];
}
}
// setup packed arrays
int grp_idx = 0;
int A_p_idx = 0;
int B_p_idx = 0;
int C_p_idx = 0;
int i_grp, i_format;
FPTYPE *a_packed, *b_packed, *c_packed;
a_packed = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT );
b_packed = (FPTYPE *)_mm_malloc( b_total * sizeof(FPTYPE), MEM_ALIGNMENT );
c_packed = (FPTYPE *)_mm_malloc( c_total * sizeof(FPTYPE), MEM_ALIGNMENT );
// setup compact_t structs
compact_t A_p, B_p, C_p;
A_p.layout = CblasColMajor;
A_p.rows = m;
A_p.cols = k;
A_p.stride = lda;
A_p.group_count = GRP_COUNT;
A_p.size_per_group = size_per_grp;
A_p.format = VECTOR_LENGTH;
A_p.mat = a_packed;
B_p.layout = CblasColMajor;
B_p.rows = k;
B_p.cols = n;
B_p.stride = ldb;
B_p.group_count = GRP_COUNT;
B_p.size_per_group = size_per_grp;
B_p.format = VECTOR_LENGTH;
B_p.mat = b_packed;
C_p.layout = CblasColMajor;
C_p.rows = m;
C_p.cols = n;
C_p.stride = ldc;
C_p.group_count = GRP_COUNT;
C_p.size_per_group = size_per_grp;
C_p.format = VECTOR_LENGTH;
C_p.mat = c_packed;
// pack the matrices in VECTOR_LENGTH chunks
for (i_grp=0; i_grp<GRP_COUNT; i_grp++) {
for (i_format=0; i_format<(size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH; i_format+=VECTOR_LENGTH) {
for (j=0; j<A_p.cols[i_grp]; j++) {
for (i=0; i<A_p.rows[i_grp]; i++) {
for (ii=0; ii<VECTOR_LENGTH; ii++) {
a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i];
}
A_p_idx+=VECTOR_LENGTH;
}
}
for (j=0; j<B_p.cols[i_grp]; j++) {
for (i=0; i<B_p.rows[i_grp]; i++) {
for (ii=0; ii<VECTOR_LENGTH; ii++) {
b_packed[ii + B_p_idx] = b_array[ii + grp_idx + i_format][B_p.rows[i_grp]*j + i];
}
B_p_idx+=VECTOR_LENGTH;
}
}
for (j=0; j<C_p.cols[i_grp]; j++) {
for (i=0; i<C_p.rows[i_grp]; i++) {
for (ii=0; ii<VECTOR_LENGTH; ii++) {
c_packed[ii + C_p_idx] = c_array[ii + grp_idx + i_format][C_p.rows[i_grp]*j + i];
}
C_p_idx+=VECTOR_LENGTH;
}
}
}
// tail handling
format_tail = size_per_grp[i_grp] - i_format;
i_format = (size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH;
if(format_tail > 0) {
for (j=0; j<A_p.cols[i_grp]; j++) {
for (i=0; i<A_p.rows[i_grp]; i++) {
for (ii=0; ii<format_tail; ii++) {
a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i];
}
A_p_idx+=format_tail;
}
}
for (j=0; j<B_p.cols[i_grp]; j++) {
for (i=0; i<B_p.rows[i_grp]; i++) {
for (ii=0; ii<format_tail; ii++) {
b_packed[ii + B_p_idx] = b_array[ii + grp_idx + i_format][B_p.rows[i_grp]*j + i];
}
B_p_idx+=format_tail;
}
}
for (j=0; j<C_p.cols[i_grp]; j++) {
for (i=0; i<C_p.rows[i_grp]; i++) {
for (ii=0; ii<format_tail; ii++) {
c_packed[ii + C_p_idx] = c_array[ii + grp_idx + i_format][C_p.rows[i_grp]*j + i];
}
C_p_idx+=format_tail;
}
}
}
grp_idx += size_per_grp[i_grp];
}
printf("\n THREADS --- m = k = n --- GEMM_BATCH --- GEMM_BATCH_COMPUTE\n");
for (i = 0; i < REPEAT; i++) {
startt = omp_get_wtime();
GEMM_BATCH( CblasColMajor, TRANSA, TRANSB, m, n, k, alpha, (const FPTYPE**)a_array, lda, (const FPTYPE**)b_array, ldb, beta, d_array, ldc, GRP_COUNT, size_per_grp );
stopt = omp_get_wtime();
mint = min(mint, (stopt - startt));
}
int one=1;
for (i = 0; i < REPEAT; i++) {
startt = omp_get_wtime();
GEMM_COMPUTE_BATCH( TRANSA, TRANSB, alpha, &A_p, &B_p, beta, &C_p );
stopt = omp_get_wtime();
mints = min(mints, (stopt - startt));
}
printf(" %d --- %d --- %5.2f --- %5.2f\n", nthr, m_init, (double)op_count/(mint*1e9), (double)op_count/(mints*1e9));
_mm_free(a_packed);
_mm_free(b_packed);
_mm_free(c_packed);
_mm_free(a);
_mm_free(b);
_mm_free(c);
_mm_free(d);
return 0;
}
|
editmesh_utils.c | /*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2004 by Blender Foundation.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Joseph Eagar
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/editors/mesh/editmesh_utils.c
* \ingroup edmesh
*/
#include "MEM_guardedalloc.h"
#include "DNA_mesh_types.h"
#include "DNA_object_types.h"
#include "DNA_key_types.h"
#include "BLI_math.h"
#include "BLI_alloca.h"
#include "BLI_buffer.h"
#include "BLI_kdtree.h"
#include "BLI_listbase.h"
#include "BKE_DerivedMesh.h"
#include "BKE_context.h"
#include "BKE_global.h"
#include "BKE_depsgraph.h"
#include "BKE_main.h"
#include "BKE_mesh.h"
#include "BKE_mesh_mapping.h"
#include "BKE_report.h"
#include "BKE_editmesh.h"
#include "BKE_editmesh_bvh.h"
#include "BKE_object.h" /* XXX. only for EDBM_mesh_ensure_valid_dm_hack() which will be removed */
#include "WM_api.h"
#include "WM_types.h"
#include "ED_mesh.h"
#include "ED_screen.h"
#include "ED_view3d.h"
#include "mesh_intern.h" /* own include */
/* mesh backup implementation. This would greatly benefit from some sort of binary diffing
* just as the undo stack would. So leaving this as an interface for further work */
BMBackup EDBM_redo_state_store(BMEditMesh *em)
{
BMBackup backup;
backup.bmcopy = BM_mesh_copy(em->bm);
return backup;
}
void EDBM_redo_state_restore(BMBackup backup, BMEditMesh *em, int recalctess)
{
BMesh *tmpbm;
if (!em || !backup.bmcopy)
return;
BM_mesh_data_free(em->bm);
tmpbm = BM_mesh_copy(backup.bmcopy);
*em->bm = *tmpbm;
MEM_freeN(tmpbm);
tmpbm = NULL;
if (recalctess)
BKE_editmesh_tessface_calc(em);
}
void EDBM_redo_state_free(BMBackup *backup, BMEditMesh *em, int recalctess)
{
if (em && backup->bmcopy) {
BM_mesh_data_free(em->bm);
*em->bm = *backup->bmcopy;
}
else if (backup->bmcopy) {
BM_mesh_data_free(backup->bmcopy);
}
if (backup->bmcopy)
MEM_freeN(backup->bmcopy);
backup->bmcopy = NULL;
if (recalctess && em)
BKE_editmesh_tessface_calc(em);
}
/* hack to workaround multiple operators being called within the same event loop without an update
* see: [#31811] */
void EDBM_mesh_ensure_valid_dm_hack(Scene *scene, BMEditMesh *em)
{
if ((((ID *)em->ob->data)->tag & LIB_TAG_ID_RECALC) ||
(em->ob->recalc & OB_RECALC_DATA))
{
/* since we may not have done selection flushing */
if ((em->ob->recalc & OB_RECALC_DATA) == 0) {
DAG_id_tag_update(&em->ob->id, OB_RECALC_DATA);
}
BKE_object_handle_update(G.main->eval_ctx, scene, em->ob);
}
}
void EDBM_mesh_normals_update(BMEditMesh *em)
{
BM_mesh_normals_update(em->bm);
}
void EDBM_mesh_clear(BMEditMesh *em)
{
/* clear bmesh */
BM_mesh_clear(em->bm);
/* free derived meshes */
BKE_editmesh_free_derivedmesh(em);
/* free tessellation data */
em->tottri = 0;
if (em->looptris) {
MEM_freeN(em->looptris);
em->looptris = NULL;
}
}
void EDBM_stats_update(BMEditMesh *em)
{
const char iter_types[3] = {BM_VERTS_OF_MESH,
BM_EDGES_OF_MESH,
BM_FACES_OF_MESH};
BMIter iter;
BMElem *ele;
int *tots[3];
int i;
tots[0] = &em->bm->totvertsel;
tots[1] = &em->bm->totedgesel;
tots[2] = &em->bm->totfacesel;
em->bm->totvertsel = em->bm->totedgesel = em->bm->totfacesel = 0;
for (i = 0; i < 3; i++) {
ele = BM_iter_new(&iter, em->bm, iter_types[i], NULL);
for ( ; ele; ele = BM_iter_step(&iter)) {
if (BM_elem_flag_test(ele, BM_ELEM_SELECT)) {
(*tots[i])++;
}
}
}
}
DerivedMesh *EDBM_mesh_deform_dm_get(BMEditMesh *em)
{
return ((em->derivedFinal != NULL) &&
(em->derivedFinal->type == DM_TYPE_EDITBMESH) &&
(em->derivedFinal->deformedOnly != false)) ? em->derivedFinal : NULL;
}
bool EDBM_op_init(BMEditMesh *em, BMOperator *bmop, wmOperator *op, const char *fmt, ...)
{
BMesh *bm = em->bm;
va_list list;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, bmop, BMO_FLAG_DEFAULTS, fmt, list)) {
BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__);
va_end(list);
return false;
}
if (!em->emcopy)
em->emcopy = BKE_editmesh_copy(em);
em->emcopyusers++;
va_end(list);
return true;
}
/* returns 0 on error, 1 on success. executes and finishes a bmesh operator */
bool EDBM_op_finish(BMEditMesh *em, BMOperator *bmop, wmOperator *op, const bool do_report)
{
const char *errmsg;
BMO_op_finish(em->bm, bmop);
if (BMO_error_get(em->bm, &errmsg, NULL)) {
BMEditMesh *emcopy = em->emcopy;
if (do_report) {
BKE_report(op->reports, RPT_ERROR, errmsg);
}
EDBM_mesh_free(em);
*em = *emcopy;
MEM_freeN(emcopy);
em->emcopyusers = 0;
em->emcopy = NULL;
/* when copying, tessellation isn't to for faster copying,
* but means we need to re-tessellate here */
if (em->looptris == NULL) {
BKE_editmesh_tessface_calc(em);
}
return false;
}
else {
em->emcopyusers--;
if (em->emcopyusers < 0) {
printf("warning: em->emcopyusers was less than zero.\n");
}
if (em->emcopyusers <= 0) {
BKE_editmesh_free(em->emcopy);
MEM_freeN(em->emcopy);
em->emcopy = NULL;
}
return true;
}
}
bool EDBM_op_callf(BMEditMesh *em, wmOperator *op, const char *fmt, ...)
{
BMesh *bm = em->bm;
BMOperator bmop;
va_list list;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) {
BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__);
va_end(list);
return false;
}
if (!em->emcopy)
em->emcopy = BKE_editmesh_copy(em);
em->emcopyusers++;
BMO_op_exec(bm, &bmop);
va_end(list);
return EDBM_op_finish(em, &bmop, op, true);
}
bool EDBM_op_call_and_selectf(BMEditMesh *em, wmOperator *op,
const char *select_slot_out, const bool select_extend,
const char *fmt, ...)
{
BMOpSlot *slot_select_out;
BMesh *bm = em->bm;
BMOperator bmop;
va_list list;
char hflag;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) {
BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__);
va_end(list);
return false;
}
if (!em->emcopy)
em->emcopy = BKE_editmesh_copy(em);
em->emcopyusers++;
BMO_op_exec(bm, &bmop);
slot_select_out = BMO_slot_get(bmop.slots_out, select_slot_out);
hflag = slot_select_out->slot_subtype.elem & BM_ALL_NOLOOP;
BLI_assert(hflag != 0);
if (select_extend == false) {
BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false);
}
BMO_slot_buffer_hflag_enable(em->bm, bmop.slots_out, select_slot_out, hflag, BM_ELEM_SELECT, true);
va_end(list);
return EDBM_op_finish(em, &bmop, op, true);
}
bool EDBM_op_call_silentf(BMEditMesh *em, const char *fmt, ...)
{
BMesh *bm = em->bm;
BMOperator bmop;
va_list list;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) {
va_end(list);
return false;
}
if (!em->emcopy)
em->emcopy = BKE_editmesh_copy(em);
em->emcopyusers++;
BMO_op_exec(bm, &bmop);
va_end(list);
return EDBM_op_finish(em, &bmop, NULL, false);
}
void EDBM_selectmode_to_scene(bContext *C)
{
Scene *scene = CTX_data_scene(C);
Object *obedit = CTX_data_edit_object(C);
BMEditMesh *em = BKE_editmesh_from_object(obedit);
if (!em)
return;
scene->toolsettings->selectmode = em->selectmode;
/* Request redraw of header buttons (to show new select mode) */
WM_event_add_notifier(C, NC_SCENE | ND_TOOLSETTINGS, scene);
}
void EDBM_mesh_make(ToolSettings *ts, Object *ob, const bool add_key_index)
{
Mesh *me = ob->data;
BMesh *bm;
if (UNLIKELY(!me->mpoly && me->totface)) {
BKE_mesh_convert_mfaces_to_mpolys(me);
}
bm = BKE_mesh_to_bmesh(
me, ob, add_key_index,
&((struct BMeshCreateParams){.use_toolflags = true,}));
if (me->edit_btmesh) {
/* this happens when switching shape keys */
EDBM_mesh_free(me->edit_btmesh);
MEM_freeN(me->edit_btmesh);
}
/* currently executing operators re-tessellates, so we can avoid doing here
* but at some point it may need to be added back. */
#if 0
me->edit_btmesh = BKE_editmesh_create(bm, true);
#else
me->edit_btmesh = BKE_editmesh_create(bm, false);
#endif
me->edit_btmesh->selectmode = me->edit_btmesh->bm->selectmode = ts->selectmode;
me->edit_btmesh->mat_nr = (ob->actcol > 0) ? ob->actcol - 1 : 0;
me->edit_btmesh->ob = ob;
/* we need to flush selection because the mode may have changed from when last in editmode */
EDBM_selectmode_flush(me->edit_btmesh);
}
/**
* \warning This can invalidate the #DerivedMesh cache of other objects (for linked duplicates).
* Most callers should run #DAG_id_tag_update on \a ob->data, see: T46738, T46913
*/
void EDBM_mesh_load(Object *ob)
{
Mesh *me = ob->data;
BMesh *bm = me->edit_btmesh->bm;
/* Workaround for T42360, 'ob->shapenr' should be 1 in this case.
* however this isn't synchronized between objects at the moment. */
if (UNLIKELY((ob->shapenr == 0) && (me->key && !BLI_listbase_is_empty(&me->key->block)))) {
bm->shapenr = 1;
}
BM_mesh_bm_to_me(bm, me, (&(struct BMeshToMeshParams){0}));
#ifdef USE_TESSFACE_DEFAULT
BKE_mesh_tessface_calc(me);
#endif
/* Free derived mesh. usually this would happen through depsgraph but there
* are exceptions like file save that will not cause this, and we want to
* avoid ending up with an invalid derived mesh then.
*
* Do it for all objects which shares the same mesh datablock, since their
* derived meshes might also be referencing data which was just freed,
*
* Annoying enough, but currently seems most efficient way to avoid access
* of freed data on scene update, especially in cases when there are dependency
* cycles.
*/
for (Object *other_object = G.main->object.first;
other_object != NULL;
other_object = other_object->id.next)
{
if (other_object->data == ob->data) {
BKE_object_free_derived_caches(other_object);
}
}
}
/**
* Should only be called on the active editmesh, otherwise call #BKE_editmesh_free
*/
void EDBM_mesh_free(BMEditMesh *em)
{
/* These tables aren't used yet, so it's not strictly necessary
* to 'end' them (with 'e' param) but if someone tries to start
* using them, having these in place will save a lot of pain */
ED_mesh_mirror_spatial_table(NULL, NULL, NULL, NULL, 'e');
ED_mesh_mirror_topo_table(NULL, NULL, 'e');
BKE_editmesh_free(em);
}
void EDBM_selectmode_flush_ex(BMEditMesh *em, const short selectmode)
{
BM_mesh_select_mode_flush_ex(em->bm, selectmode);
}
void EDBM_selectmode_flush(BMEditMesh *em)
{
EDBM_selectmode_flush_ex(em, em->selectmode);
}
void EDBM_deselect_flush(BMEditMesh *em)
{
/* function below doesnt use. just do this to keep the values in sync */
em->bm->selectmode = em->selectmode;
BM_mesh_deselect_flush(em->bm);
}
void EDBM_select_flush(BMEditMesh *em)
{
/* function below doesnt use. just do this to keep the values in sync */
em->bm->selectmode = em->selectmode;
BM_mesh_select_flush(em->bm);
}
void EDBM_select_more(BMEditMesh *em, const bool use_face_step)
{
BMOperator bmop;
const bool use_faces = (em->selectmode == SCE_SELECT_FACE);
BMO_op_initf(em->bm, &bmop, BMO_FLAG_DEFAULTS,
"region_extend geom=%hvef use_contract=%b use_faces=%b use_face_step=%b",
BM_ELEM_SELECT, false, use_faces, use_face_step);
BMO_op_exec(em->bm, &bmop);
/* don't flush selection in edge/vertex mode */
BMO_slot_buffer_hflag_enable(em->bm, bmop.slots_out, "geom.out", BM_ALL_NOLOOP, BM_ELEM_SELECT, use_faces ? true : false);
BMO_op_finish(em->bm, &bmop);
EDBM_selectmode_flush(em);
}
void EDBM_select_less(BMEditMesh *em, const bool use_face_step)
{
BMOperator bmop;
const bool use_faces = (em->selectmode == SCE_SELECT_FACE);
BMO_op_initf(em->bm, &bmop, BMO_FLAG_DEFAULTS,
"region_extend geom=%hvef use_contract=%b use_faces=%b use_face_step=%b",
BM_ELEM_SELECT, true, use_faces, use_face_step);
BMO_op_exec(em->bm, &bmop);
/* don't flush selection in edge/vertex mode */
BMO_slot_buffer_hflag_disable(em->bm, bmop.slots_out, "geom.out", BM_ALL_NOLOOP, BM_ELEM_SELECT, use_faces ? true : false);
BMO_op_finish(em->bm, &bmop);
EDBM_selectmode_flush(em);
/* only needed for select less, ensure we don't have isolated elements remaining */
BM_mesh_select_mode_clean(em->bm);
}
void EDBM_flag_disable_all(BMEditMesh *em, const char hflag)
{
BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, hflag, false);
}
void EDBM_flag_enable_all(BMEditMesh *em, const char hflag)
{
BM_mesh_elem_hflag_enable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, hflag, true);
}
/**
* Return a new UVVertMap from the editmesh
*/
UvVertMap *BM_uv_vert_map_create(
BMesh *bm,
const float limit[2], const bool use_select, const bool use_winding)
{
BMVert *ev;
BMFace *efa;
BMLoop *l;
BMIter iter, liter;
/* vars from original func */
UvVertMap *vmap;
UvMapVert *buf;
/* MTexPoly *tf; */ /* UNUSED */
MLoopUV *luv;
unsigned int a;
int totverts, i, totuv, totfaces;
const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
bool *winding = NULL;
BLI_buffer_declare_static(vec2f, tf_uv_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
BM_mesh_elem_index_ensure(bm, BM_VERT | BM_FACE);
totfaces = bm->totface;
totverts = bm->totvert;
totuv = 0;
/* generate UvMapVert array */
BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
if ((use_select == false) || BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
totuv += efa->len;
}
}
if (totuv == 0) {
return NULL;
}
vmap = (UvVertMap *)MEM_callocN(sizeof(*vmap), "UvVertMap");
if (!vmap) {
return NULL;
}
vmap->vert = (UvMapVert **)MEM_callocN(sizeof(*vmap->vert) * totverts, "UvMapVert_pt");
buf = vmap->buf = (UvMapVert *)MEM_callocN(sizeof(*vmap->buf) * totuv, "UvMapVert");
if (use_winding) {
winding = MEM_callocN(sizeof(*winding) * totfaces, "winding");
}
if (!vmap->vert || !vmap->buf) {
BKE_mesh_uv_vert_map_free(vmap);
return NULL;
}
BM_ITER_MESH_INDEX (efa, &iter, bm, BM_FACES_OF_MESH, a) {
if ((use_select == false) || BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
float (*tf_uv)[2];
if (use_winding) {
tf_uv = (float (*)[2])BLI_buffer_reinit_data(&tf_uv_buf, vec2f, efa->len);
}
BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i) {
buf->tfindex = i;
buf->f = a;
buf->separate = 0;
buf->next = vmap->vert[BM_elem_index_get(l->v)];
vmap->vert[BM_elem_index_get(l->v)] = buf;
buf++;
if (use_winding) {
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
copy_v2_v2(tf_uv[i], luv->uv);
}
}
if (use_winding) {
winding[a] = cross_poly_v2(tf_uv, efa->len) > 0;
}
}
}
/* sort individual uvs for each vert */
BM_ITER_MESH_INDEX (ev, &iter, bm, BM_VERTS_OF_MESH, a) {
UvMapVert *newvlist = NULL, *vlist = vmap->vert[a];
UvMapVert *iterv, *v, *lastv, *next;
float *uv, *uv2, uvdiff[2];
while (vlist) {
v = vlist;
vlist = vlist->next;
v->next = newvlist;
newvlist = v;
efa = BM_face_at_index(bm, v->f);
/* tf = CustomData_bmesh_get(&bm->pdata, efa->head.data, CD_MTEXPOLY); */ /* UNUSED */
l = BM_iter_at_index(bm, BM_LOOPS_OF_FACE, efa, v->tfindex);
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
uv = luv->uv;
lastv = NULL;
iterv = vlist;
while (iterv) {
next = iterv->next;
efa = BM_face_at_index(bm, iterv->f);
/* tf = CustomData_bmesh_get(&bm->pdata, efa->head.data, CD_MTEXPOLY); */ /* UNUSED */
l = BM_iter_at_index(bm, BM_LOOPS_OF_FACE, efa, iterv->tfindex);
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
uv2 = luv->uv;
sub_v2_v2v2(uvdiff, uv2, uv);
if (fabsf(uvdiff[0]) < limit[0] && fabsf(uvdiff[1]) < limit[1] &&
(!use_winding || winding[iterv->f] == winding[v->f]))
{
if (lastv) lastv->next = next;
else vlist = next;
iterv->next = newvlist;
newvlist = iterv;
}
else {
lastv = iterv;
}
iterv = next;
}
newvlist->separate = 1;
}
vmap->vert[a] = newvlist;
}
if (use_winding) {
MEM_freeN(winding);
}
BLI_buffer_free(&tf_uv_buf);
return vmap;
}
UvMapVert *BM_uv_vert_map_at_index(UvVertMap *vmap, unsigned int v)
{
return vmap->vert[v];
}
/* A specialized vert map used by stitch operator */
UvElementMap *BM_uv_element_map_create(
BMesh *bm,
const bool selected, const bool use_winding, const bool do_islands)
{
BMVert *ev;
BMFace *efa;
BMLoop *l;
BMIter iter, liter;
/* vars from original func */
UvElementMap *element_map;
UvElement *buf;
bool *winding;
BLI_buffer_declare_static(vec2f, tf_uv_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
MLoopUV *luv;
int totverts, totfaces, i, totuv, j;
const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
BM_mesh_elem_index_ensure(bm, BM_VERT | BM_FACE);
totfaces = bm->totface;
totverts = bm->totvert;
totuv = 0;
/* generate UvElement array */
BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
if (!selected || BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
totuv += efa->len;
}
}
if (totuv == 0) {
return NULL;
}
element_map = (UvElementMap *)MEM_callocN(sizeof(*element_map), "UvElementMap");
element_map->totalUVs = totuv;
element_map->vert = (UvElement **)MEM_callocN(sizeof(*element_map->vert) * totverts, "UvElementVerts");
buf = element_map->buf = (UvElement *)MEM_callocN(sizeof(*element_map->buf) * totuv, "UvElement");
if (use_winding) {
winding = MEM_mallocN(sizeof(*winding) * totfaces, "winding");
}
BM_ITER_MESH_INDEX (efa, &iter, bm, BM_FACES_OF_MESH, j) {
if (use_winding) {
winding[j] = false;
}
if (!selected || BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
float (*tf_uv)[2];
if (use_winding) {
tf_uv = (float (*)[2])BLI_buffer_reinit_data(&tf_uv_buf, vec2f, efa->len);
}
BM_ITER_ELEM_INDEX (l, &liter, efa, BM_LOOPS_OF_FACE, i) {
buf->l = l;
buf->separate = 0;
buf->island = INVALID_ISLAND;
buf->tfindex = i;
buf->next = element_map->vert[BM_elem_index_get(l->v)];
element_map->vert[BM_elem_index_get(l->v)] = buf;
if (use_winding) {
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
copy_v2_v2(tf_uv[i], luv->uv);
}
buf++;
}
if (use_winding) {
winding[j] = cross_poly_v2(tf_uv, efa->len) > 0;
}
}
}
/* sort individual uvs for each vert */
BM_ITER_MESH_INDEX (ev, &iter, bm, BM_VERTS_OF_MESH, i) {
UvElement *newvlist = NULL, *vlist = element_map->vert[i];
UvElement *iterv, *v, *lastv, *next;
float *uv, *uv2, uvdiff[2];
while (vlist) {
v = vlist;
vlist = vlist->next;
v->next = newvlist;
newvlist = v;
l = v->l;
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
uv = luv->uv;
lastv = NULL;
iterv = vlist;
while (iterv) {
next = iterv->next;
l = iterv->l;
luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
uv2 = luv->uv;
sub_v2_v2v2(uvdiff, uv2, uv);
if (fabsf(uvdiff[0]) < STD_UV_CONNECT_LIMIT && fabsf(uvdiff[1]) < STD_UV_CONNECT_LIMIT &&
(!use_winding || winding[BM_elem_index_get(iterv->l->f)] == winding[BM_elem_index_get(v->l->f)]))
{
if (lastv) lastv->next = next;
else vlist = next;
iterv->next = newvlist;
newvlist = iterv;
}
else {
lastv = iterv;
}
iterv = next;
}
newvlist->separate = 1;
}
element_map->vert[i] = newvlist;
}
if (use_winding) {
MEM_freeN(winding);
}
if (do_islands) {
unsigned int *map;
BMFace **stack;
int stacksize = 0;
UvElement *islandbuf;
/* island number for faces */
int *island_number = NULL;
int nislands = 0, islandbufsize = 0;
/* map holds the map from current vmap->buf to the new, sorted map */
map = MEM_mallocN(sizeof(*map) * totuv, "uvelement_remap");
stack = MEM_mallocN(sizeof(*stack) * bm->totface, "uv_island_face_stack");
islandbuf = MEM_callocN(sizeof(*islandbuf) * totuv, "uvelement_island_buffer");
island_number = MEM_mallocN(sizeof(*island_number) * totfaces, "uv_island_number_face");
copy_vn_i(island_number, totfaces, INVALID_ISLAND);
/* at this point, every UvElement in vert points to a UvElement sharing the same vertex. Now we should sort uv's in islands. */
for (i = 0; i < totuv; i++) {
if (element_map->buf[i].island == INVALID_ISLAND) {
element_map->buf[i].island = nislands;
stack[0] = element_map->buf[i].l->f;
island_number[BM_elem_index_get(stack[0])] = nislands;
stacksize = 1;
while (stacksize > 0) {
efa = stack[--stacksize];
BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
UvElement *element, *initelement = element_map->vert[BM_elem_index_get(l->v)];
for (element = initelement; element; element = element->next) {
if (element->separate)
initelement = element;
if (element->l->f == efa) {
/* found the uv corresponding to our face and vertex. Now fill it to the buffer */
element->island = nislands;
map[element - element_map->buf] = islandbufsize;
islandbuf[islandbufsize].l = element->l;
islandbuf[islandbufsize].separate = element->separate;
islandbuf[islandbufsize].tfindex = element->tfindex;
islandbuf[islandbufsize].island = nislands;
islandbufsize++;
for (element = initelement; element; element = element->next) {
if (element->separate && element != initelement)
break;
if (island_number[BM_elem_index_get(element->l->f)] == INVALID_ISLAND) {
stack[stacksize++] = element->l->f;
island_number[BM_elem_index_get(element->l->f)] = nislands;
}
}
break;
}
}
}
}
nislands++;
}
}
MEM_freeN(island_number);
/* remap */
for (i = 0; i < bm->totvert; i++) {
/* important since we may do selection only. Some of these may be NULL */
if (element_map->vert[i])
element_map->vert[i] = &islandbuf[map[element_map->vert[i] - element_map->buf]];
}
element_map->islandIndices = MEM_callocN(sizeof(*element_map->islandIndices) * nislands, "UvElementMap_island_indices");
j = 0;
for (i = 0; i < totuv; i++) {
UvElement *element = element_map->buf[i].next;
if (element == NULL)
islandbuf[map[i]].next = NULL;
else
islandbuf[map[i]].next = &islandbuf[map[element - element_map->buf]];
if (islandbuf[i].island != j) {
j++;
element_map->islandIndices[j] = i;
}
}
MEM_freeN(element_map->buf);
element_map->buf = islandbuf;
element_map->totalIslands = nislands;
MEM_freeN(stack);
MEM_freeN(map);
}
BLI_buffer_free(&tf_uv_buf);
return element_map;
}
void BM_uv_vert_map_free(UvVertMap *vmap)
{
if (vmap) {
if (vmap->vert) MEM_freeN(vmap->vert);
if (vmap->buf) MEM_freeN(vmap->buf);
MEM_freeN(vmap);
}
}
void BM_uv_element_map_free(UvElementMap *element_map)
{
if (element_map) {
if (element_map->vert) MEM_freeN(element_map->vert);
if (element_map->buf) MEM_freeN(element_map->buf);
if (element_map->islandIndices) MEM_freeN(element_map->islandIndices);
MEM_freeN(element_map);
}
}
UvElement *BM_uv_element_get(UvElementMap *map, BMFace *efa, BMLoop *l)
{
UvElement *element;
element = map->vert[BM_elem_index_get(l->v)];
for (; element; element = element->next)
if (element->l->f == efa)
return element;
return NULL;
}
/* last_sel, use em->act_face otherwise get the last selected face in the editselections
* at the moment, last_sel is mainly useful for making sure the space image dosnt flicker */
MTexPoly *EDBM_mtexpoly_active_get(BMEditMesh *em, BMFace **r_act_efa, const bool sloppy, const bool selected)
{
BMFace *efa = NULL;
if (!EDBM_mtexpoly_check(em))
return NULL;
efa = BM_mesh_active_face_get(em->bm, sloppy, selected);
if (efa) {
if (r_act_efa) *r_act_efa = efa;
return CustomData_bmesh_get(&em->bm->pdata, efa->head.data, CD_MTEXPOLY);
}
if (r_act_efa) *r_act_efa = NULL;
return NULL;
}
/* can we edit UV's for this mesh?*/
bool EDBM_mtexpoly_check(BMEditMesh *em)
{
/* some of these checks could be a touch overkill */
return em && em->bm->totface && CustomData_has_layer(&em->bm->pdata, CD_MTEXPOLY) &&
CustomData_has_layer(&em->bm->ldata, CD_MLOOPUV);
}
bool EDBM_vert_color_check(BMEditMesh *em)
{
/* some of these checks could be a touch overkill */
return em && em->bm->totface && CustomData_has_layer(&em->bm->ldata, CD_MLOOPCOL);
}
static BMVert *cache_mirr_intptr_as_bmvert(intptr_t *index_lookup, int index)
{
intptr_t eve_i = index_lookup[index];
return (eve_i == -1) ? NULL : (BMVert *)eve_i;
}
/**
* Mirror editing API, usage:
*
* \code{.c}
* EDBM_verts_mirror_cache_begin(em, ...);
*
* BM_ITER_MESH (v, &iter, em->bm, BM_VERTS_OF_MESH) {
* v_mirror = EDBM_verts_mirror_get(em, v);
* e_mirror = EDBM_verts_mirror_get_edge(em, e);
* f_mirror = EDBM_verts_mirror_get_face(em, f);
* }
*
* EDBM_verts_mirror_cache_end(em);
* \endcode
*/
/* BM_SEARCH_MAXDIST is too big, copied from 2.6x MOC_THRESH, should become a
* preference */
#define BM_SEARCH_MAXDIST_MIRR 0.00002f
#define BM_CD_LAYER_ID "__mirror_index"
/**
* \param em Editmesh.
* \param use_self Allow a vertex to point to its self (middle verts).
* \param use_select Restrict to selected verts.
* \param use_topology Use topology mirror.
* \param maxdist Distance for close point test.
* \param r_index Optional array to write into, as an alternative to a customdata layer (length of total verts).
*/
void EDBM_verts_mirror_cache_begin_ex(BMEditMesh *em, const int axis, const bool use_self, const bool use_select,
/* extra args */
const bool use_topology, float maxdist, int *r_index)
{
Mesh *me = (Mesh *)em->ob->data;
BMesh *bm = em->bm;
BMIter iter;
BMVert *v;
int cd_vmirr_offset;
int i;
const float maxdist_sq = SQUARE(maxdist);
/* one or the other is used depending if topo is enabled */
KDTree *tree = NULL;
MirrTopoStore_t mesh_topo_store = {NULL, -1, -1, -1};
BM_mesh_elem_table_ensure(bm, BM_VERT);
if (r_index == NULL) {
const char *layer_id = BM_CD_LAYER_ID;
em->mirror_cdlayer = CustomData_get_named_layer_index(&bm->vdata, CD_PROP_INT, layer_id);
if (em->mirror_cdlayer == -1) {
BM_data_layer_add_named(bm, &bm->vdata, CD_PROP_INT, layer_id);
em->mirror_cdlayer = CustomData_get_named_layer_index(&bm->vdata, CD_PROP_INT, layer_id);
}
cd_vmirr_offset = CustomData_get_n_offset(&bm->vdata, CD_PROP_INT,
em->mirror_cdlayer - CustomData_get_layer_index(&bm->vdata, CD_PROP_INT));
bm->vdata.layers[em->mirror_cdlayer].flag |= CD_FLAG_TEMPORARY;
}
BM_mesh_elem_index_ensure(bm, BM_VERT);
if (use_topology) {
ED_mesh_mirrtopo_init(me, NULL, -1, &mesh_topo_store, true);
}
else {
tree = BLI_kdtree_new(bm->totvert);
BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
BLI_kdtree_insert(tree, i, v->co);
}
BLI_kdtree_balance(tree);
}
#define VERT_INTPTR(_v, _i) r_index ? &r_index[_i] : BM_ELEM_CD_GET_VOID_P(_v, cd_vmirr_offset);
BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
BLI_assert(BM_elem_index_get(v) == i);
/* temporary for testing, check for selection */
if (use_select && !BM_elem_flag_test(v, BM_ELEM_SELECT)) {
/* do nothing */
}
else {
BMVert *v_mirr;
int *idx = VERT_INTPTR(v, i);
if (use_topology) {
v_mirr = cache_mirr_intptr_as_bmvert(mesh_topo_store.index_lookup, i);
}
else {
int i_mirr;
float co[3];
copy_v3_v3(co, v->co);
co[axis] *= -1.0f;
v_mirr = NULL;
i_mirr = BLI_kdtree_find_nearest(tree, co, NULL);
if (i_mirr != -1) {
BMVert *v_test = BM_vert_at_index(bm, i_mirr);
if (len_squared_v3v3(co, v_test->co) < maxdist_sq) {
v_mirr = v_test;
}
}
}
if (v_mirr && (use_self || (v_mirr != v))) {
const int i_mirr = BM_elem_index_get(v_mirr);
*idx = i_mirr;
idx = VERT_INTPTR(v_mirr, i_mirr);
*idx = i;
}
else {
*idx = -1;
}
}
}
#undef VERT_INTPTR
if (use_topology) {
ED_mesh_mirrtopo_free(&mesh_topo_store);
}
else {
BLI_kdtree_free(tree);
}
}
void EDBM_verts_mirror_cache_begin(BMEditMesh *em, const int axis,
const bool use_self, const bool use_select,
const bool use_topology)
{
EDBM_verts_mirror_cache_begin_ex(em, axis,
use_self, use_select,
/* extra args */
use_topology, BM_SEARCH_MAXDIST_MIRR, NULL);
}
BMVert *EDBM_verts_mirror_get(BMEditMesh *em, BMVert *v)
{
const int *mirr = CustomData_bmesh_get_layer_n(&em->bm->vdata, v->head.data, em->mirror_cdlayer);
BLI_assert(em->mirror_cdlayer != -1); /* invalid use */
if (mirr && *mirr >= 0 && *mirr < em->bm->totvert) {
if (!em->bm->vtable) {
printf("err: should only be called between "
"EDBM_verts_mirror_cache_begin and EDBM_verts_mirror_cache_end");
return NULL;
}
return em->bm->vtable[*mirr];
}
return NULL;
}
BMEdge *EDBM_verts_mirror_get_edge(BMEditMesh *em, BMEdge *e)
{
BMVert *v1_mirr = EDBM_verts_mirror_get(em, e->v1);
if (v1_mirr) {
BMVert *v2_mirr = EDBM_verts_mirror_get(em, e->v2);
if (v2_mirr) {
return BM_edge_exists(v1_mirr, v2_mirr);
}
}
return NULL;
}
BMFace *EDBM_verts_mirror_get_face(BMEditMesh *em, BMFace *f)
{
BMVert **v_mirr_arr = BLI_array_alloca(v_mirr_arr, f->len);
BMLoop *l_iter, *l_first;
unsigned int i = 0;
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
if ((v_mirr_arr[i++] = EDBM_verts_mirror_get(em, l_iter->v)) == NULL) {
return NULL;
}
} while ((l_iter = l_iter->next) != l_first);
return BM_face_exists(v_mirr_arr, f->len);
}
void EDBM_verts_mirror_cache_clear(BMEditMesh *em, BMVert *v)
{
int *mirr = CustomData_bmesh_get_layer_n(&em->bm->vdata, v->head.data, em->mirror_cdlayer);
BLI_assert(em->mirror_cdlayer != -1); /* invalid use */
if (mirr) {
*mirr = -1;
}
}
void EDBM_verts_mirror_cache_end(BMEditMesh *em)
{
em->mirror_cdlayer = -1;
}
void EDBM_verts_mirror_apply(BMEditMesh *em, const int sel_from, const int sel_to)
{
BMIter iter;
BMVert *v;
BLI_assert((em->bm->vtable != NULL) && ((em->bm->elem_table_dirty & BM_VERT) == 0));
BM_ITER_MESH (v, &iter, em->bm, BM_VERTS_OF_MESH) {
if (BM_elem_flag_test(v, BM_ELEM_SELECT) == sel_from) {
BMVert *mirr = EDBM_verts_mirror_get(em, v);
if (mirr) {
if (BM_elem_flag_test(mirr, BM_ELEM_SELECT) == sel_to) {
copy_v3_v3(mirr->co, v->co);
mirr->co[0] *= -1.0f;
}
}
}
}
}
/* swap is 0 or 1, if 1 it hides not selected */
void EDBM_mesh_hide(BMEditMesh *em, bool swap)
{
BMIter iter;
BMElem *ele;
int itermode;
char hflag_swap = swap ? BM_ELEM_SELECT : 0;
if (em == NULL) return;
if (em->selectmode & SCE_SELECT_VERTEX)
itermode = BM_VERTS_OF_MESH;
else if (em->selectmode & SCE_SELECT_EDGE)
itermode = BM_EDGES_OF_MESH;
else
itermode = BM_FACES_OF_MESH;
BM_ITER_MESH (ele, &iter, em->bm, itermode) {
if (BM_elem_flag_test(ele, BM_ELEM_SELECT) ^ hflag_swap)
BM_elem_hide_set(em->bm, ele, true);
}
EDBM_selectmode_flush(em);
/* original hide flushing comment (OUTDATED):
* hide happens on least dominant select mode, and flushes up, not down! (helps preventing errors in subsurf) */
/* - vertex hidden, always means edge is hidden too
* - edge hidden, always means face is hidden too
* - face hidden, only set face hide
* - then only flush back down what's absolute hidden
*/
}
void EDBM_mesh_reveal(BMEditMesh *em)
{
const char iter_types[3] = {BM_VERTS_OF_MESH,
BM_EDGES_OF_MESH,
BM_FACES_OF_MESH};
const bool sels[3] = {
(em->selectmode & SCE_SELECT_VERTEX) != 0,
(em->selectmode & SCE_SELECT_EDGE) != 0,
(em->selectmode & SCE_SELECT_FACE) != 0,
};
int i;
/* Use tag flag to remember what was hidden before all is revealed.
* BM_ELEM_HIDDEN --> BM_ELEM_TAG */
#pragma omp parallel for schedule(static) if (em->bm->totvert + em->bm->totedge + em->bm->totface >= BM_OMP_LIMIT)
for (i = 0; i < 3; i++) {
BMIter iter;
BMElem *ele;
BM_ITER_MESH (ele, &iter, em->bm, iter_types[i]) {
BM_elem_flag_set(ele, BM_ELEM_TAG, BM_elem_flag_test(ele, BM_ELEM_HIDDEN));
}
}
/* Reveal everything */
EDBM_flag_disable_all(em, BM_ELEM_HIDDEN);
/* Select relevant just-revealed elements */
for (i = 0; i < 3; i++) {
BMIter iter;
BMElem *ele;
if (!sels[i]) {
continue;
}
BM_ITER_MESH (ele, &iter, em->bm, iter_types[i]) {
if (BM_elem_flag_test(ele, BM_ELEM_TAG)) {
BM_elem_select_set(em->bm, ele, true);
}
}
}
EDBM_selectmode_flush(em);
/* hidden faces can have invalid normals */
EDBM_mesh_normals_update(em);
}
/* so many tools call these that we better make it a generic function.
*/
void EDBM_update_generic(BMEditMesh *em, const bool do_tessface, const bool is_destructive)
{
Object *ob = em->ob;
/* order of calling isn't important */
DAG_id_tag_update(ob->data, OB_RECALC_DATA);
WM_main_add_notifier(NC_GEOM | ND_DATA, ob->data);
if (do_tessface) {
BKE_editmesh_tessface_calc(em);
}
if (is_destructive) {
/* TODO. we may be able to remove this now! - Campbell */
// BM_mesh_elem_table_free(em->bm, BM_ALL_NOLOOP);
}
else {
/* in debug mode double check we didn't need to recalculate */
BLI_assert(BM_mesh_elem_table_check(em->bm) == true);
}
/* don't keep stale derivedMesh data around, see: [#38872] */
BKE_editmesh_free_derivedmesh(em);
#ifdef DEBUG
{
BMEditSelection *ese;
for (ese = em->bm->selected.first; ese; ese = ese->next) {
BLI_assert(BM_elem_flag_test(ese->ele, BM_ELEM_SELECT));
}
}
#endif
}
/* poll call for mesh operators requiring a view3d context */
int EDBM_view3d_poll(bContext *C)
{
if (ED_operator_editmesh(C) && ED_operator_view3d_active(C))
return 1;
return 0;
}
BMElem *EDBM_elem_from_selectmode(BMEditMesh *em, BMVert *eve, BMEdge *eed, BMFace *efa)
{
BMElem *ele = NULL;
if ((em->selectmode & SCE_SELECT_VERTEX) && eve) {
ele = (BMElem *)eve;
}
else if ((em->selectmode & SCE_SELECT_EDGE) && eed) {
ele = (BMElem *)eed;
}
else if ((em->selectmode & SCE_SELECT_FACE) && efa) {
ele = (BMElem *)efa;
}
return ele;
}
/**
* Used when we want to store a single index for any vert/edge/face.
*
* Intended for use with operators.
*/
int EDBM_elem_to_index_any(BMEditMesh *em, BMElem *ele)
{
BMesh *bm = em->bm;
int index = BM_elem_index_get(ele);
if (ele->head.htype == BM_VERT) {
BLI_assert(!(bm->elem_index_dirty & BM_VERT));
}
else if (ele->head.htype == BM_EDGE) {
BLI_assert(!(bm->elem_index_dirty & BM_EDGE));
index += bm->totvert;
}
else if (ele->head.htype == BM_FACE) {
BLI_assert(!(bm->elem_index_dirty & BM_FACE));
index += bm->totvert + bm->totedge;
}
else {
BLI_assert(0);
}
return index;
}
BMElem *EDBM_elem_from_index_any(BMEditMesh *em, int index)
{
BMesh *bm = em->bm;
if (index < bm->totvert) {
return (BMElem *)BM_vert_at_index_find_or_table(bm, index);
}
index -= bm->totvert;
if (index < bm->totedge) {
return (BMElem *)BM_edge_at_index_find_or_table(bm, index);
}
index -= bm->totedge;
if (index < bm->totface) {
return (BMElem *)BM_face_at_index_find_or_table(bm, index);
}
return NULL;
}
/* -------------------------------------------------------------------- */
/* BMBVH functions */
// XXX
#if 0 //BMESH_TODO: not implemented yet
int BMBVH_VertVisible(BMBVHTree *tree, BMEdge *e, RegionView3D *r3d)
{
}
#endif
static BMFace *edge_ray_cast(struct BMBVHTree *tree, const float co[3], const float dir[3], float *r_hitout, BMEdge *e)
{
BMFace *f = BKE_bmbvh_ray_cast(tree, co, dir, 0.0f, NULL, r_hitout, NULL);
if (f && BM_edge_in_face(e, f))
return NULL;
return f;
}
static void scale_point(float c1[3], const float p[3], const float s)
{
sub_v3_v3(c1, p);
mul_v3_fl(c1, s);
add_v3_v3(c1, p);
}
bool BMBVH_EdgeVisible(struct BMBVHTree *tree, BMEdge *e, ARegion *ar, View3D *v3d, Object *obedit)
{
BMFace *f;
float co1[3], co2[3], co3[3], dir1[3], dir2[3], dir3[3];
float origin[3], invmat[4][4];
float epsilon = 0.01f;
float end[3];
const float mval_f[2] = {ar->winx / 2.0f,
ar->winy / 2.0f};
ED_view3d_win_to_segment(ar, v3d, mval_f, origin, end, false);
invert_m4_m4(invmat, obedit->obmat);
mul_m4_v3(invmat, origin);
copy_v3_v3(co1, e->v1->co);
mid_v3_v3v3(co2, e->v1->co, e->v2->co);
copy_v3_v3(co3, e->v2->co);
scale_point(co1, co2, 0.99);
scale_point(co3, co2, 0.99);
/* ok, idea is to generate rays going from the camera origin to the
* three points on the edge (v1, mid, v2)*/
sub_v3_v3v3(dir1, origin, co1);
sub_v3_v3v3(dir2, origin, co2);
sub_v3_v3v3(dir3, origin, co3);
normalize_v3_length(dir1, epsilon);
normalize_v3_length(dir2, epsilon);
normalize_v3_length(dir3, epsilon);
/* offset coordinates slightly along view vectors, to avoid
* hitting the faces that own the edge.*/
add_v3_v3v3(co1, co1, dir1);
add_v3_v3v3(co2, co2, dir2);
add_v3_v3v3(co3, co3, dir3);
normalize_v3(dir1);
normalize_v3(dir2);
normalize_v3(dir3);
/* do three samplings: left, middle, right */
f = edge_ray_cast(tree, co1, dir1, NULL, e);
if (f && !edge_ray_cast(tree, co2, dir2, NULL, e))
return true;
else if (f && !edge_ray_cast(tree, co3, dir3, NULL, e))
return true;
else if (!f)
return true;
return false;
}
|
shader.h | #ifndef SHADER_H
#define SHADER_H
// ===============================
// AUTHOR : Angel Ortiz (angelo12 AT vt DOT edu)
// CREATE DATE : 2018-07-12
// PURPOSE : Emulate modern programmable vertex and fragment shaders. Allow texture
// reading and full Physically based rendering models.
// ===============================
// SPECIAL NOTES: I kpet the older shaders that I wrote while working towards
// building the final PBR model because I thought it would be nice to see the progression
// Although using pure interface classes would seem to incur a perforamnce
// penalty through pointer chasing I did not measure it through profiling.
// ===============================
//Headers
#include "vector3D.h"
#include "matrix.h"
#include "texture.h"
#include <math.h>
//Shader Interface for a class that emulates modern GPU fragment and vertex shaders
struct IShader {
virtual ~IShader() {};
virtual Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light = Vector3f{1,1,1}) = 0;
virtual Vector3f fragment(float u, float v) = 0;
};
//Simplest shader. Calculates light intensity per triangle.
struct FlatShader : public IShader {
Matrix4 MVP, MV;
float varIntensity;
Vector3f rgb{255,255,255};
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
varIntensity = std::max(0.0f,normal.dotProduct(light));
return MVP.matMultVec(vertex); //Transforms verts into projected space
}
Vector3f fragment(float u, float v) override{
return rgb*varIntensity;
}
};
//More Complex shader that calculates a per-vertex intensity and interpolates
//through the fragments of the triangle
struct GouraudShader : public IShader {
//Per object data
Matrix4 MVP, MV, V, N;
Vector3f lightColor1{1,1,1}, lightColor2{0,0,1}, lightColor3{1,1,1};
float ambientStrength = 0.05, diffStrength = 0, specularStrength = 0.5, spec = 0;
Vector3f rgb{255,255,255};
//Per vertex data
Vector3f varying_diffuse, varying_specular, reflectDir, viewDir, lightDir, correctNormal;
//Per pixel data
Vector3f ambient, diffuse, specular;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
//Vertex attributes
correctNormal = N.matMultDir(normal).normalized();
lightDir = V.matMultDir(light).normalized();
reflectDir = Vector3f::reflect(-lightDir, correctNormal);
viewDir = MV.matMultVec(vertex).normalized();
//Values to be interpolated
varying_specular.data[index] = std::pow( std::max( -viewDir.dotProduct(reflectDir), 0.0f), 32.0f);
varying_diffuse.data[index] = std::max(0.0f, (correctNormal).dotProduct(-lightDir));
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolating
diffStrength = varying_diffuse.x + u*(varying_diffuse.y - varying_diffuse.x) + v*(varying_diffuse.z - varying_diffuse.x);
spec = varying_specular.x + u*(varying_specular.y - varying_specular.x) + v*(varying_specular.z - varying_specular.x);
//Phong reflection model
ambient = lightColor1 * ambientStrength;
diffuse = lightColor2 * diffStrength;
specular = lightColor3 * (specularStrength * spec);
return (ambient + diffuse + specular) * rgb;
}
};
//Even more complex shader that interpolates normals and calculates intensities per fragment instead
//instead of per vertex.
struct PhongShader : public IShader {
//Per object data
Matrix4 MVP, MV, V, N;
float ambientStrength = 0.05, diffStrength = 0, specularStrength = 0.9, spec = 0;
Vector3f lightColor{0,0.1,1},lightColorSpec{1,1,1};
Vector3f rgb{255,255,255};
//Per vertex data
Vector3f normals[3], viewDir[3];
Vector3f varying_diffuse, varying_specular, reflectDir, lightDir;
//Per pixel data
Vector3f ambient, diffuse, specular, interpNormal, interpViewDir;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
//Vertex attributes
normals[index] = N.matMultDir(normal).normalized();
viewDir[index] = MV.matMultVec(vertex).normalized();
lightDir = V.matMultDir(light).normalized();
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated stuff
interpNormal = normals[0] + (normals[1] - normals[0])* u + (normals[2] - normals[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Ambient
ambient = lightColor * ambientStrength;
//Diffuse
diffStrength = std::max(0.0f, (interpNormal.normalized()).dotProduct(lightDir));
diffuse = lightColor * diffStrength;
//Specular
reflectDir = Vector3f::reflect(-lightDir, interpNormal);
spec = std::pow( std::max( (-interpViewDir.normalized()).dotProduct(reflectDir), 0.0f), 50.0f);
specular = lightColorSpec * (specularStrength * spec);
return (ambient + diffuse + specular) * rgb;
}
};
//Optimized version of Phong shader that uses a half angle instead of individual reflection
//angles
struct BlinnPhongShader : public IShader {
//Per object data
Texture *albedoT;
Matrix4 MVP, MV, V, N;
float ambientStrength = 0.05, diffStrength=1 , specularStrength= 0.5;
Vector3f lightColor{1,1,1};
//Per vertex data
Vector3f normals[3], viewDir[3], UV[3];
float diff, spec, shininess = 128;
//Per fragment data
Vector3f ambient, diffuse, specular, interpNormal, interpViewDir, interpUV;
Vector3f halfwayDir, lightDir;
Vector3f interpCol, white{255,255,255};
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override{
normals[index] = N.matMultDir(normal).normalized();
UV[index] = textureVals;
viewDir[index] = MV.matMultVec(vertex).normalized();
lightDir = V.matMultDir(light).normalized();
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated stuff
interpNormal = (normals[0] + (normals[1] - normals[0])* u + (normals[2] - normals[0]) * v).normalized();
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
interpUV = UV[0] + (UV[1] - UV[0])* u + (UV[2] - UV[0]) * v;
//Albedo
interpCol = albedoT->getPixelVal(interpUV.x, interpUV.y);
//Ambient
ambient = lightColor * ambientStrength;
//Diffuse
diff = std::max(0.0f, interpNormal.dotProduct(lightDir));
diffuse = lightColor * diff * diffStrength;
//Specular
halfwayDir = (lightDir - interpViewDir).normalized();
spec = std::pow(std::max(0.0f, interpNormal.dotProduct(halfwayDir)), shininess);
specular = lightColor * spec * specularStrength;
return (ambient + diffuse) * interpCol + specular * white;
}
};
// Shader that uses texture mapping extensively
struct TextureMapShader : public IShader {
//Variables set per model
Texture *albedoT, *normalT, *ambientOT;
Matrix4 MVP, MV, V, M, N;
Vector3f cameraPos;
//Light Variables
Vector3f lightColor{1,1,1}, white{1,1,1};
float ambientStrength = 0.1, diffStrength = 0.9, specularStrength = 0.8;
float diff, spec, shininess = 128;
Vector3f lightDir[3];
//Variables set per vertex
Vector3f viewDir[3], texCoords[3];
Vector3f normal_WS, tangent_WS, biTangent_WS;
Matrix4 TBN;
//Interpolated variables
Vector3f interpCoords, interpLightDir, interpNormal,
interpViewDir, interpCol, interpAO;
//Per fragment
Vector3f ambient, diffuse, specular ;
Vector3f halfwayDir;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals, const Vector3f &tangent,
int index, const Vector3f &light) override{
//Creating TBN matrix
normal_WS = N.matMultDir(normal).normalized();
tangent_WS = N.matMultDir(tangent).normalized();
biTangent_WS = normal_WS.crossProduct(tangent_WS);
TBN = Matrix4::TBNMatrix(tangent_WS, biTangent_WS, normal_WS);
//Getting UV coordinates for use in both albedo and normal textures
texCoords[index] = textureVals;
//Passing all lighting related data to tangent space
lightDir[index] = TBN.matMultVec(light);
viewDir[index] = TBN.matMultVec(cameraPos - M.matMultVec(vertex));
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated attributes
interpCoords = texCoords[0] + (texCoords[1] - texCoords[0])* u + (texCoords[2] - texCoords[0]) * v;
interpLightDir = lightDir[0] + (lightDir[1] - lightDir[0])* u + (lightDir[2] - lightDir[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Reading albedo and normal data from textures
interpCol = albedoT->getPixelVal(interpCoords.x, interpCoords.y);
interpAO = ambientOT->getIntensityVal(interpCoords.x, interpCoords.y);
interpNormal = normalT->getPixelVal(interpCoords.x, interpCoords.y);
interpNormal = interpNormal.normalized();
//Ambient
ambient = lightColor * ambientStrength * interpAO;
//Diffuse
diff = std::max(0.0f, interpNormal.dotProduct(interpLightDir));
diffuse = lightColor * diff * diffStrength;
//Specular
halfwayDir = (interpLightDir + interpViewDir).normalized();
spec = std::pow(std::max(0.0f, interpNormal.dotProduct(halfwayDir)), shininess);
specular = lightColor * spec * specularStrength;
return (ambient + diffuse) * interpCol + specular * white;
}
};
// Shader that uses texture mapping and a PBR approach for shading
// Uses a cook-torrance BRDF for direct light sources.
struct PBRShader : public IShader {
//Variables set per model
Texture *albedoT, *normalT, *ambientOT, *roughT, *metalT;
Matrix4 MVP, MV, V, M, N;
Vector3f cameraPos;
//Light Variables
Vector3f F0{0.04, 0.04, 0.04}, F0corrected; //Default value dielectric
Vector3f *lightDirVal, *lightCol, *lightPos;
float nDotL, nDotV, ambientInt = 0.01;
int numLights;
//Variables set per vertex
Vector3f viewDir[3], texCoords[3];
Vector3f normal_WS, tangent_WS, biTangent_WS;
Matrix4 TBN;
//Interpolated variables
Vector3f interpCoords, interpNormal, interpViewDir, interpCol;
//Per fragment
Vector3f radianceOut, ambient;
float interpRough, interpAO, interpMetal;
float uTexture, vTexture, intPart;
//BRDF functions
Vector3f fresnelSchlick(float cosTheta, Vector3f &fresnel0 ){
float invcCosTheta = 1.0 - cosTheta;
return fresnel0 + (Vector3f(1.0)- fresnel0) * (invcCosTheta * invcCosTheta * invcCosTheta * invcCosTheta * invcCosTheta);
}
float distributionGGX(Vector3f normal, Vector3f halfway, float roughness){
float a = roughness*roughness;
float a2 = a*a;
float NdotH = std::max(normal.dotProduct(halfway), 0.0f);
float NdotH2 = NdotH*NdotH;
float denom = (NdotH2 * (a2 - 1.0f) + 1.0f);
denom = M_1_PIf32/ (denom * denom);
return a2 * denom;
}
float GeometrySchlickGGX(float Ndot, float roughness){
float r = (roughness + 1.0f);
float k = (r*r) / 8.0f; //Only useful for direct lighting must be changed in ibr
float denom = 1.0f / (Ndot * (1.0f- k) + k);
return Ndot * denom;
}
float GeometrySmith(float roughness, float nDL, float nDV){
return GeometrySchlickGGX(nDL, roughness) * GeometrySchlickGGX(nDV, roughness);
}
//Vertex shader
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals, const Vector3f &tangent,
int index, const Vector3f &light = Vector3f{1,1,1}) override
{
//Creating TBN matrix
normal_WS = N.matMultDir(normal).normalized();
tangent_WS = N.matMultDir(tangent).normalized();
biTangent_WS = normal_WS.crossProduct(tangent_WS);
TBN = Matrix4::TBNMatrix(tangent_WS, biTangent_WS, normal_WS);
//Getting UV coordinates for use in all textures
texCoords[index] = textureVals;
//Passing all lighting related data to tangent space
for(int lIndex = 0; lIndex < numLights; ++lIndex){
int indc2 = (lIndex*3) + index;
lightDirVal[indc2] = TBN.matMultDir(lightPos[lIndex]);
}
viewDir[index] = TBN.matMultDir(cameraPos - M.matMultVec(vertex));
return MVP.matMultVec(vertex);
}
//Fragment shader
Vector3f fragment(float u, float v) override{
//Interpolated attributes
interpCoords = texCoords[0] + (texCoords[1] - texCoords[0])* u + (texCoords[2] - texCoords[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Correcting UV's for tiling
uTexture = std::modf(interpCoords.x, &intPart);
vTexture = std::modf(interpCoords.y, &intPart);
//Reading data from textures for use in lighting calculations
interpCol = albedoT->getPixelVal(uTexture, vTexture);
interpAO = ambientOT->getIntensityVal(uTexture, vTexture);
interpRough = roughT->getIntensityVal(uTexture, vTexture);;
interpMetal = metalT->getIntensityVal(uTexture, vTexture);
interpNormal = normalT->getPixelVal(uTexture, vTexture);
interpNormal = interpNormal.normalized();
interpViewDir = interpViewDir.normalized();
//Varying f0 based on metallicness of surface
float invMetal = (1.0f-interpMetal);
F0corrected = (F0 * invMetal) + (interpCol * interpMetal);
nDotV = std::max(interpNormal.dotProduct(interpViewDir), 0.0f);
//Setting up Direct Lighting variables
const int maxLights = numLights;
//Fresnel, normal distribution function and geometry occlusion
Vector3f F[maxLights];
float NDF[maxLights];
float G[maxLights];
//Storing in array for vectorizing
Vector3f radianceLights[maxLights];
Vector3f interpLightDir[maxLights];
Vector3f halfwayDir[maxLights];
float nDotL[maxLights];
Vector3f numerator[maxLights];
float invDenominator[maxLights];
Vector3f specular[maxLights];
Vector3f kD[maxLights];
//Interpolating each light direction for every light
int val;
for(int i = 0; i < maxLights; ++i ){
val = i*3;
interpLightDir[i] = (lightDirVal[val] + (lightDirVal[val + 1] - lightDirVal[val])* u + (lightDirVal[val + 2] - lightDirVal[val]) * v).normalized();
}
//Per light illumination calculations that can be simdified
//Currently uses widest SIMD array to perform all light iterations in one trip
//Which I believe leaves some extra
#pragma omp simd
for(int light = 0; light < maxLights; ++light ){
halfwayDir[light] = (interpLightDir[light] + interpViewDir);
halfwayDir[light] = halfwayDir[light].normalized();
nDotL[light] = std::fmax(interpNormal.dotProduct(interpLightDir[light]), 0.0f);
//No problem vectorizing these functions because they are inlined by the compiler
//And also only consist of math operations to the vectors
F[light] = fresnelSchlick(std::fmax(halfwayDir[light].dotProduct(interpViewDir), 0.0f), F0corrected);
NDF[light] = distributionGGX(interpNormal, halfwayDir[light], interpRough);
G[light] = GeometrySmith(interpRough, nDotL[light] , nDotV);
numerator[light] = F[light] * G[light]*NDF[light];
invDenominator[light] = 1.0f / std::fmax(4.0f * (nDotL[light] * nDotV), 0.001f);
specular[light] = numerator[light] * invDenominator[light];
//kd is 1 - kf which is the stuff to the right of the vecotr
kD[light] = (Vector3f(1.0f) - F[light])*invMetal;
//The rendering equation result for a given light
radianceLights[light] = (kD[light] * (interpCol * (M_1_PIf32)) + specular[light] ) * nDotL[light] * lightCol[light];
}
//Summing up all radiance values since SIMD won't work if I do this within the
//previous loop
radianceOut.zero();
for(int i = 0; i < maxLights; ++i) {
radianceOut += radianceLights[i];
}
//Simplistic ambient term
ambient = interpCol * (ambientInt * interpAO);
return ambient + radianceOut;
}
};
#endif |
GB_binop__isne_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64)
// A*D function (colscale): GB (_AxD__isne_int64)
// D*A function (rowscale): GB (_DxB__isne_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64)
// C=scalar+B GB (_bind1st__isne_int64)
// C=scalar+B' GB (_bind1st_tran__isne_int64)
// C=A+scalar GB (_bind2nd__isne_int64)
// C=A'+scalar GB (_bind2nd_tran__isne_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gardenia.h | #pragma once
#include <cassert>
#include <cstdio>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#define USE_OMP
#ifdef USE_TBB
#include <tbb/task_group.h>
#include <tbb/tbb.h>
#endif
#ifdef USE_OMP
#include <omp.h>
#endif
namespace gardenia {
struct blocked_range {
typedef size_t const_iterator;
blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {}
blocked_range(int begin, int end) : begin_(begin), end_(end) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
size_t begin_;
size_t end_;
};
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
blocked_range r(begin, end);
f(r);
}
template <typename Func>
void parallel_for(size_t begin, size_t end,
const Func &f, size_t /*grainsize*/) {
assert(end >= begin);
#pragma omp parallel for
for (int i = static_cast<int>(begin); i < static_cast<int>(end); ++i)
f(blocked_range(i, i + 1));
}
template <typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template <typename T, typename Func>
inline void for_(bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
parallelize = parallelize && value_representation<size_t>(end);
parallelize ? parallel_for(begin, end, f, grainsize)
: xparallel_for(begin, end, f);
}
template <typename Func, typename... Args>
void on_each(Func&& f, const Args&... args) {
#pragma omp parallel
{
f(omp_get_thread_num());
};
}
template <typename T, typename Func>
inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100u) {
#ifdef USE_SINGLE_THREAD
for (size_t i = 0; i < size; ++i) {
f(i);
}
#else // #ifdef USE_SINGLE_THREAD
for_(parallelize, 0u, size, [&](const blocked_range &r) {
#ifdef USE_OMP
#pragma omp parallel for
for (int i = static_cast<int>(r.begin());
i < static_cast<int>(r.end()); i++) {
f(i);
}
#else
for (size_t i = r.begin(); i < r.end(); i++) {
f(i);
}
#endif
}, grainsize);
#endif // #ifdef USE_SINGLE_THREAD
}
template <typename T, typename Func>
inline void for_i(T size, Func f, size_t grainsize = 100) {
for_i(true, size, f, grainsize);
}
}
|
kernel_wrapper.c | #include <stdio.h>
#include <string.h>
#include <iostream>
#include <omp.h>
#include <math.h>
#include "./../main.h"
void
kernel_wrapper( fp* image, // input image
int Nr, // IMAGE nbr of rows
int Nc, // IMAGE nbr of cols
long Ne, // IMAGE nbr of elem
int niter, // nbr of iterations
fp lambda, // update step size
long NeROI, // ROI nbr of elements
int* iN,
int* iS,
int* jE,
int* jW,
int iter) // primary loop
{
fp *dN = (fp*) malloc (sizeof(fp)*Ne);
fp *dS = (fp*) malloc (sizeof(fp)*Ne);
fp *dW = (fp*) malloc (sizeof(fp)*Ne);
fp *dE = (fp*) malloc (sizeof(fp)*Ne);
fp *c = (fp*) malloc (sizeof(fp)*Ne);
fp *sums = (fp*) malloc (sizeof(fp)*Ne);
fp *sums2 = (fp*) malloc (sizeof(fp)*Ne);
#pragma omp target data map(tofrom: image[0:Ne])\
map(to: iN[0:Nr], iS[0:Nr], jE[0:Nc], jW[0:Nc])\
map(alloc: dN[0:Ne], dS[0:Ne], dW[0:Ne], dE[0:Ne], \
c[0:Ne], sums[0:Ne], sums2[0:Ne])
{
//======================================================================================================================================================150
// KERNEL EXECUTION PARAMETERS
//======================================================================================================================================================150
// threads
size_t threads;
threads = NUMBER_THREADS;
// workgroups
int blocks_work_size;
int blocks_x = Ne/(int)threads;
if (Ne % (int)threads != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks_work_size = blocks_x;
printf("max # of workgroups = %d, # of threads/workgroup = %d (ensure that device can handle)\n",
(int)(blocks_work_size), (int)threads);
//======================================================================================================================================================150
// Extract Kernel - SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//======================================================================================================================================================150
#ifdef DEBUG
for (long i = 0; i < 16; i++)
printf("before extract: %f\n",image[i]);
printf("\n");
#endif
#pragma omp target teams distribute parallel for num_teams(blocks_work_size) thread_limit(NUMBER_THREADS)
for (int ei = 0; ei < Ne; ei++)
image[ei] = expf(image[ei]/(fp)255); // exponentiate input IMAGE and copy to output image
int blocks2_work_size;
long no;
int mul;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// Prepare kernel
#pragma omp target teams distribute parallel for num_teams(blocks_work_size) thread_limit(NUMBER_THREADS)
for (int ei = 0; ei < Ne; ei++) {
sums[ei] = image[ei];
sums2[ei] = image[ei]*image[ei];
}
// initial values
blocks2_work_size = blocks_work_size; // original number of blocks
no = Ne; // original number of sum elements
mul = 1; // original multiplier
// loop
while(blocks2_work_size != 0){
#ifdef DEBUG
printf("max # of workgroups = %d, # of threads/workgroup = %d (ensure that device can handle)\n",
blocks2_work_size, (int)threads);
#endif
#pragma omp target teams num_teams(blocks2_work_size) thread_limit(NUMBER_THREADS)
{
fp psum[NUMBER_THREADS];
fp psum2[NUMBER_THREADS];
#pragma omp parallel
{
int bx = omp_get_team_num();
int tx = omp_get_thread_num();
int ei = (bx*NUMBER_THREADS)+tx;// unique thread id, more threads than actual elements !!!
int nf = NUMBER_THREADS-(blocks2_work_size*NUMBER_THREADS-no);// number of elements assigned to last block
int df = 0;// divisibility factor for the last block
// counters
int i;
// copy data to shared memory
if(ei<no){// do only for the number of elements, omit extra threads
psum[tx] = sums[ei*mul];
psum2[tx] = sums2[ei*mul];
}
#pragma omp barrier
// reduction of sums if all blocks are full (rare case)
if(nf == NUMBER_THREADS){
// sum of every 2, 4, ..., NUMBER_THREADS elements
for(i=2; i<=NUMBER_THREADS; i=2*i){
// sum of elements
if((tx+1) % i == 0){ // every ith
psum[tx] = psum[tx] + psum[tx-i/2];
psum2[tx] = psum2[tx] + psum2[tx-i/2];
}
// synchronization
#pragma omp barrier
}
// final sumation by last thread in every block
if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory
sums[bx*mul*NUMBER_THREADS] = psum[tx];
sums2[bx*mul*NUMBER_THREADS] = psum2[tx];
}
}
// reduction of sums if last block is not full (common case)
else{
// for full blocks (all except for last block)
if(bx != (blocks2_work_size - 1)){ //
// sum of every 2, 4, ..., NUMBER_THREADS elements
for(i=2; i<=NUMBER_THREADS; i=2*i){ //
// sum of elements
if((tx+1) % i == 0){ // every ith
psum[tx] = psum[tx] + psum[tx-i/2];
psum2[tx] = psum2[tx] + psum2[tx-i/2];
}
// synchronization
#pragma omp barrier
}
// final sumation by last thread in every block
if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory
sums[bx*mul*NUMBER_THREADS] = psum[tx];
sums2[bx*mul*NUMBER_THREADS] = psum2[tx];
}
}
// for not full block (last block)
else{ //
// figure out divisibility
for(i=2; i<=NUMBER_THREADS; i=2*i){ //
if(nf >= i){
df = i;
}
}
// sum of every 2, 4, ..., NUMBER_THREADS elements
for(i=2; i<=df; i=2*i){ //
// sum of elements (only busy threads)
if((tx+1) % i == 0 && tx<df){ // every ith
psum[tx] = psum[tx] + psum[tx-i/2];
psum2[tx] = psum2[tx] + psum2[tx-i/2];
}
// synchronization (all threads)
#pragma omp barrier
}
// remainder / final summation by last thread
if(tx==(df-1)){ //
// compute the remainder and final summation by last busy thread
for(i=(bx*NUMBER_THREADS)+df; i<(bx*NUMBER_THREADS)+nf; i++){ //
psum[tx] = psum[tx] + sums[i];
psum2[tx] = psum2[tx] + sums2[i];
}
// final sumation by last thread in every block
sums[bx*mul*NUMBER_THREADS] = psum[tx];
sums2[bx*mul*NUMBER_THREADS] = psum2[tx];
}
}
}
}
}
// update execution parameters
no = blocks2_work_size;
if(blocks2_work_size == 1){
blocks2_work_size = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2_work_size/(int)threads; // number of blocks
if (blocks2_work_size % (int)threads != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2_work_size = blocks_x;
}
} // while
#pragma omp target update from (sums[0:1])
#pragma omp target update from (sums2[0:1])
#ifdef DEBUG
printf("total: %f total2: %f\n", sums[0], sums2[0]);
#endif
//====================================================================================================100
// calculate statistics
//====================================================================================================100
meanROI = sums[0] / (fp)(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (sums2[0] / (fp)(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
//====================================================================================================100
// execute srad kernel
//====================================================================================================100
#pragma omp target teams distribute parallel for num_teams(blocks_work_size) thread_limit(NUMBER_THREADS)
for (int ei = 0; ei < Ne; ei++) {
// figure out row/col location in new matrix
int row = (ei+1) % Nr - 1; // (0-n) row
int col = (ei+1) / Nr + 1 - 1; // (0-n) column
if((ei+1) % Nr == 0){
row = Nr - 1;
col = col - 1;
}
// directional derivatives, ICOV, diffusion coefficent
fp d_Jc = image[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared memory or temp files)
fp N_loc = image[iN[row] + Nr*col] - d_Jc; // north direction derivative
fp S_loc = image[iS[row] + Nr*col] - d_Jc; // south direction derivative
fp W_loc = image[row + Nr*jW[col]] - d_Jc; // west direction derivative
fp E_loc = image[row + Nr*jE[col]] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
fp d_G2 = (N_loc*N_loc + S_loc*S_loc + W_loc*W_loc + E_loc*E_loc) / (d_Jc*d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
fp d_L = (N_loc + S_loc + W_loc + E_loc) / d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
fp d_num = ((fp)0.5*d_G2) - (((fp)1.0/(fp)16.0)*(d_L*d_L)) ; // num (based on gradient and laplacian)
fp d_den = (fp)1 + ((fp)0.25*d_L); // den (based on laplacian)
fp d_qsqr = d_num/(d_den*d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; // den (based on qsqr and q0sqr)
fp d_c_loc = (fp)1.0 / ((fp)1.0+d_den) ; // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (d_c_loc < 0){ // if diffusion coefficient < 0
d_c_loc = 0; // ... set to 0
}
else if (d_c_loc > 1){ // if diffusion coefficient > 1
d_c_loc = 1; // ... set to 1
}
// save data to global memory
dN[ei] = N_loc;
dS[ei] = S_loc;
dW[ei] = W_loc;
dE[ei] = E_loc;
c[ei] = d_c_loc;
}
//====================================================================================================100
// execute srad2 kernel
//====================================================================================================100
#pragma omp target teams distribute parallel for num_teams(blocks_work_size ) thread_limit(NUMBER_THREADS)
for (int ei = 0; ei < Ne; ei++){ // make sure that only threads matching jobs run
// figure out row/col location in new matrix
int row = (ei+1) % Nr - 1; // (0-n) row
int col = (ei+1) / Nr ; // (0-n) column
if((ei+1) % Nr == 0){
row = Nr - 1;
col = col - 1;
}
// diffusion coefficent
fp d_cN = c[ei]; // north diffusion coefficient
fp d_cS = c[iS[row] + Nr*col]; // south diffusion coefficient
fp d_cW = c[ei]; // west diffusion coefficient
fp d_cE = c[row + Nr * jE[col]]; // east diffusion coefficient
// divergence (equ 58)
fp d_D = d_cN*dN[ei] + d_cS*dS[ei] + d_cW*dW[ei] + d_cE*dE[ei];
// image update (equ 61) (every element of IMAGE)
image[ei] += (fp)0.25*lambda*d_D; // updates image (based on input time step and divergence)
}
} // for
// print a newline after the display of iteration numbers
printf("\n");
//======================================================================================================================================================150
// Compress Kernel - SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//======================================================================================================================================================150
#pragma omp target teams distribute parallel for num_teams(blocks_work_size ) thread_limit(NUMBER_THREADS)
for (int ei = 0; ei < Ne; ei++)
image[ei] = logf(image[ei])*(fp)255; // exponentiate input IMAGE and copy to output image
}
//
#ifdef DEBUG
for (long i = 0; i < 16; i++)
printf("%f ", image[i]);
printf("\n");
#endif
free(dN);
free(dS);
free(dW);
free(dE);
free(c);
free(sums);
free(sums2);
}
|
simpleomp.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_SIMPLEOMP_H
#define NCNN_SIMPLEOMP_H
#include "platform.h"
#if NCNN_SIMPLEOMP
#include <stdint.h>
// This minimal openmp runtime implementation only supports the llvm openmp abi
// and only supports #pragma omp parallel for num_threads(X)
#ifdef __cplusplus
extern "C" {
#endif
NCNN_EXPORT int omp_get_max_threads();
NCNN_EXPORT void omp_set_num_threads(int num_threads);
NCNN_EXPORT int omp_get_dynamic();
NCNN_EXPORT void omp_set_dynamic(int dynamic);
NCNN_EXPORT int omp_get_num_threads();
NCNN_EXPORT int omp_get_thread_num();
NCNN_EXPORT int kmp_get_blocktime();
NCNN_EXPORT void kmp_set_blocktime(int blocktime);
#ifdef __cplusplus
}
#endif
#endif // NCNN_SIMPLEOMP
#endif // NCNN_SIMPLEOMP_H
|
GB_unaryop__identity_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_uint16
// op(A') function: GB_tran__identity_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_uint16
(
int16_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i);
float* outptr0 = outptr;
float* outptr1 = outptr + outw;
float* outptr2 = outptr + outw * 2;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0++;
outptr1++;
outptr2++;
}
}
}
}
}
static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i * 2);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
// out row 0
float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7
float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8
float32x4x2_t _out0 = vld2q_f32(outptr0);
_out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6
_out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
_out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8
vst2q_f32(outptr0 + 2, _out0);
// out row 1
float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7
float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8
float32x4x2_t _out1 = vld2q_f32(outptr1);
_out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6
_out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
_out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8
vst2q_f32(outptr1 + 2, _out1);
// out row 2
float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7
float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6
_out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8
vst2q_f32(outptr2 + 2, _out2);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
}
}
}
}
}
|
collision.c | /*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) Blender Foundation
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): none yet.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/collision.c
* \ingroup bke
*/
#include "MEM_guardedalloc.h"
#include "DNA_cloth_types.h"
#include "DNA_effect_types.h"
#include "DNA_group_types.h"
#include "DNA_object_types.h"
#include "DNA_object_force.h"
#include "DNA_scene_types.h"
#include "DNA_meshdata_types.h"
#include "BLI_utildefines.h"
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_edgehash.h"
#include "BKE_cloth.h"
#include "BKE_effect.h"
#include "BKE_modifier.h"
#include "BKE_scene.h"
#ifdef WITH_BULLET
#include "Bullet-C-Api.h"
#endif
#include "BLI_kdopbvh.h"
#include "BKE_collision.h"
#ifdef WITH_ELTOPO
#include "eltopo-capi.h"
#endif
/***********************************
Collision modifier code start
***********************************/
/* step is limited from 0 (frame start position) to 1 (frame end position) */
void collision_move_object(CollisionModifierData *collmd, float step, float prevstep)
{
float tv[3] = {0, 0, 0};
unsigned int i = 0;
/* the collider doesn't move this frame */
if (collmd->is_static) {
for (i = 0; i < collmd->mvert_num; i++) {
zero_v3(collmd->current_v[i].co);
}
return;
}
for (i = 0; i < collmd->mvert_num; i++) {
sub_v3_v3v3(tv, collmd->xnew[i].co, collmd->x[i].co);
VECADDS(collmd->current_x[i].co, collmd->x[i].co, tv, prevstep);
VECADDS(collmd->current_xnew[i].co, collmd->x[i].co, tv, step);
sub_v3_v3v3(collmd->current_v[i].co, collmd->current_xnew[i].co, collmd->current_x[i].co);
}
bvhtree_update_from_mvert(
collmd->bvhtree, collmd->current_x, collmd->current_xnew,
collmd->tri, collmd->tri_num, true);
}
BVHTree *bvhtree_build_from_mvert(
const MVert *mvert,
const struct MVertTri *tri, int tri_num,
float epsilon)
{
BVHTree *tree;
const MVertTri *vt;
int i;
tree = BLI_bvhtree_new(tri_num, epsilon, 4, 26);
/* fill tree */
for (i = 0, vt = tri; i < tri_num; i++, vt++) {
float co[3][3];
copy_v3_v3(co[0], mvert[vt->tri[0]].co);
copy_v3_v3(co[1], mvert[vt->tri[1]].co);
copy_v3_v3(co[2], mvert[vt->tri[2]].co);
BLI_bvhtree_insert(tree, i, co[0], 3);
}
/* balance tree */
BLI_bvhtree_balance(tree);
return tree;
}
void bvhtree_update_from_mvert(
BVHTree *bvhtree,
const MVert *mvert, const MVert *mvert_moving,
const MVertTri *tri, int tri_num,
bool moving)
{
const MVertTri *vt;
int i;
if ((bvhtree == NULL) || (mvert == NULL)) {
return;
}
if (mvert_moving == NULL) {
moving = false;
}
for (i = 0, vt = tri; i < tri_num; i++, vt++) {
float co[3][3];
bool ret;
copy_v3_v3(co[0], mvert[vt->tri[0]].co);
copy_v3_v3(co[1], mvert[vt->tri[1]].co);
copy_v3_v3(co[2], mvert[vt->tri[2]].co);
/* copy new locations into array */
if (moving) {
float co_moving[3][3];
/* update moving positions */
copy_v3_v3(co_moving[0], mvert_moving[vt->tri[0]].co);
copy_v3_v3(co_moving[1], mvert_moving[vt->tri[1]].co);
copy_v3_v3(co_moving[2], mvert_moving[vt->tri[2]].co);
ret = BLI_bvhtree_update_node(bvhtree, i, &co[0][0], &co_moving[0][0], 3);
}
else {
ret = BLI_bvhtree_update_node(bvhtree, i, &co[0][0], NULL, 3);
}
/* check if tree is already full */
if (ret == false) {
break;
}
}
BLI_bvhtree_update_tree(bvhtree);
}
/***********************************
Collision modifier code end
***********************************/
// w3 is not perfect
static void collision_compute_barycentric ( float pv[3], float p1[3], float p2[3], float p3[3], float *w1, float *w2, float *w3 )
{
/* dot_v3v3 */
#define INPR(v1, v2) ( (v1)[0] * (v2)[0] + (v1)[1] * (v2)[1] + (v1)[2] * (v2)[2])
double tempV1[3], tempV2[3], tempV4[3];
double a, b, c, d, e, f;
VECSUB ( tempV1, p1, p3 );
VECSUB ( tempV2, p2, p3 );
VECSUB ( tempV4, pv, p3 );
a = INPR ( tempV1, tempV1 );
b = INPR ( tempV1, tempV2 );
c = INPR ( tempV2, tempV2 );
e = INPR ( tempV1, tempV4 );
f = INPR ( tempV2, tempV4 );
d = ( a * c - b * b );
if ( ABS ( d ) < (double)ALMOST_ZERO ) {
*w1 = *w2 = *w3 = 1.0 / 3.0;
return;
}
w1[0] = ( float ) ( ( e * c - b * f ) / d );
if ( w1[0] < 0 )
w1[0] = 0;
w2[0] = ( float ) ( ( f - b * ( double ) w1[0] ) / c );
if ( w2[0] < 0 )
w2[0] = 0;
w3[0] = 1.0f - w1[0] - w2[0];
#undef INPR
}
#ifdef __GNUC__
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wdouble-promotion"
#endif
DO_INLINE void collision_interpolateOnTriangle ( float to[3], float v1[3], float v2[3], float v3[3], double w1, double w2, double w3 )
{
zero_v3(to);
VECADDMUL(to, v1, w1);
VECADDMUL(to, v2, w2);
VECADDMUL(to, v3, w3);
}
static int cloth_collision_response_static ( ClothModifierData *clmd, CollisionModifierData *collmd, CollPair *collpair, CollPair *collision_end )
{
int result = 0;
Cloth *cloth1;
float w1, w2, w3, u1, u2, u3;
float v1[3], v2[3], relativeVelocity[3];
float magrelVel;
float epsilon2 = BLI_bvhtree_get_epsilon ( collmd->bvhtree );
cloth1 = clmd->clothObject;
for ( ; collpair != collision_end; collpair++ ) {
float i1[3], i2[3], i3[3];
zero_v3(i1);
zero_v3(i2);
zero_v3(i3);
/* only handle static collisions here */
if ( collpair->flag & COLLISION_IN_FUTURE )
continue;
/* compute barycentric coordinates for both collision points */
collision_compute_barycentric ( collpair->pa,
cloth1->verts[collpair->ap1].txold,
cloth1->verts[collpair->ap2].txold,
cloth1->verts[collpair->ap3].txold,
&w1, &w2, &w3 );
/* was: txold */
collision_compute_barycentric ( collpair->pb,
collmd->current_x[collpair->bp1].co,
collmd->current_x[collpair->bp2].co,
collmd->current_x[collpair->bp3].co,
&u1, &u2, &u3 );
/* Calculate relative "velocity". */
collision_interpolateOnTriangle ( v1, cloth1->verts[collpair->ap1].tv, cloth1->verts[collpair->ap2].tv, cloth1->verts[collpair->ap3].tv, w1, w2, w3 );
collision_interpolateOnTriangle ( v2, collmd->current_v[collpair->bp1].co, collmd->current_v[collpair->bp2].co, collmd->current_v[collpair->bp3].co, u1, u2, u3 );
sub_v3_v3v3(relativeVelocity, v2, v1);
/* Calculate the normal component of the relative velocity (actually only the magnitude - the direction is stored in 'normal'). */
magrelVel = dot_v3v3(relativeVelocity, collpair->normal);
/* printf("magrelVel: %f\n", magrelVel); */
/* Calculate masses of points.
* TODO */
/* If v_n_mag < 0 the edges are approaching each other. */
if ( magrelVel > ALMOST_ZERO ) {
/* Calculate Impulse magnitude to stop all motion in normal direction. */
float magtangent = 0, repulse = 0, d = 0;
double impulse = 0.0;
float vrel_t_pre[3];
float temp[3], spf;
/* calculate tangential velocity */
copy_v3_v3 ( temp, collpair->normal );
mul_v3_fl(temp, magrelVel);
sub_v3_v3v3(vrel_t_pre, relativeVelocity, temp);
/* Decrease in magnitude of relative tangential velocity due to coulomb friction
* in original formula "magrelVel" should be the "change of relative velocity in normal direction" */
magtangent = min_ff(clmd->coll_parms->friction * 0.01f * magrelVel, len_v3(vrel_t_pre));
/* Apply friction impulse. */
if ( magtangent > ALMOST_ZERO ) {
normalize_v3(vrel_t_pre);
impulse = magtangent / ( 1.0f + w1*w1 + w2*w2 + w3*w3 ); /* 2.0 * */
VECADDMUL ( i1, vrel_t_pre, w1 * impulse );
VECADDMUL ( i2, vrel_t_pre, w2 * impulse );
VECADDMUL ( i3, vrel_t_pre, w3 * impulse );
}
/* Apply velocity stopping impulse
* I_c = m * v_N / 2.0
* no 2.0 * magrelVel normally, but looks nicer DG */
impulse = magrelVel / ( 1.0 + w1*w1 + w2*w2 + w3*w3 );
VECADDMUL ( i1, collpair->normal, w1 * impulse );
cloth1->verts[collpair->ap1].impulse_count++;
VECADDMUL ( i2, collpair->normal, w2 * impulse );
cloth1->verts[collpair->ap2].impulse_count++;
VECADDMUL ( i3, collpair->normal, w3 * impulse );
cloth1->verts[collpair->ap3].impulse_count++;
/* Apply repulse impulse if distance too short
* I_r = -min(dt*kd, m(0, 1d/dt - v_n))
* DG: this formula ineeds to be changed for this code since we apply impulses/repulses like this:
* v += impulse; x_new = x + v;
* We don't use dt!!
* DG TODO: Fix usage of dt here! */
spf = (float)clmd->sim_parms->stepsPerFrame / clmd->sim_parms->timescale;
d = clmd->coll_parms->epsilon*8.0f/9.0f + epsilon2*8.0f/9.0f - collpair->distance;
if ( ( magrelVel < 0.1f*d*spf ) && ( d > ALMOST_ZERO ) ) {
repulse = MIN2 ( d*1.0f/spf, 0.1f*d*spf - magrelVel );
/* stay on the safe side and clamp repulse */
if ( impulse > ALMOST_ZERO )
repulse = min_ff( repulse, 5.0*impulse );
repulse = max_ff(impulse, repulse);
impulse = repulse / ( 1.0f + w1*w1 + w2*w2 + w3*w3 ); /* original 2.0 / 0.25 */
VECADDMUL ( i1, collpair->normal, impulse );
VECADDMUL ( i2, collpair->normal, impulse );
VECADDMUL ( i3, collpair->normal, impulse );
}
result = 1;
}
else {
/* Apply repulse impulse if distance too short
* I_r = -min(dt*kd, max(0, 1d/dt - v_n))
* DG: this formula ineeds to be changed for this code since we apply impulses/repulses like this:
* v += impulse; x_new = x + v;
* We don't use dt!! */
float spf = (float)clmd->sim_parms->stepsPerFrame / clmd->sim_parms->timescale;
float d = clmd->coll_parms->epsilon*8.0f/9.0f + epsilon2*8.0f/9.0f - (float)collpair->distance;
if ( d > ALMOST_ZERO) {
/* stay on the safe side and clamp repulse */
float repulse = d*1.0f/spf;
float impulse = repulse / ( 3.0f * ( 1.0f + w1*w1 + w2*w2 + w3*w3 )); /* original 2.0 / 0.25 */
VECADDMUL ( i1, collpair->normal, impulse );
VECADDMUL ( i2, collpair->normal, impulse );
VECADDMUL ( i3, collpair->normal, impulse );
cloth1->verts[collpair->ap1].impulse_count++;
cloth1->verts[collpair->ap2].impulse_count++;
cloth1->verts[collpair->ap3].impulse_count++;
result = 1;
}
}
if (result) {
int i = 0;
for (i = 0; i < 3; i++) {
if (cloth1->verts[collpair->ap1].impulse_count > 0 && ABS(cloth1->verts[collpair->ap1].impulse[i]) < ABS(i1[i]))
cloth1->verts[collpair->ap1].impulse[i] = i1[i];
if (cloth1->verts[collpair->ap2].impulse_count > 0 && ABS(cloth1->verts[collpair->ap2].impulse[i]) < ABS(i2[i]))
cloth1->verts[collpair->ap2].impulse[i] = i2[i];
if (cloth1->verts[collpair->ap3].impulse_count > 0 && ABS(cloth1->verts[collpair->ap3].impulse[i]) < ABS(i3[i]))
cloth1->verts[collpair->ap3].impulse[i] = i3[i];
}
}
}
return result;
}
#ifdef __GNUC__
# pragma GCC diagnostic pop
#endif
//Determines collisions on overlap, collisions are written to collpair[i] and collision+number_collision_found is returned
static CollPair* cloth_collision(ModifierData *md1, ModifierData *md2,
BVHTreeOverlap *overlap, CollPair *collpair, float UNUSED(dt))
{
ClothModifierData *clmd = (ClothModifierData *)md1;
CollisionModifierData *collmd = (CollisionModifierData *) md2;
/* Cloth *cloth = clmd->clothObject; */ /* UNUSED */
const MVertTri *tri_a, *tri_b;
#ifdef WITH_BULLET
ClothVertex *verts1 = clmd->clothObject->verts;
#endif
double distance = 0;
float epsilon1 = clmd->coll_parms->epsilon;
float epsilon2 = BLI_bvhtree_get_epsilon ( collmd->bvhtree );
tri_a = &clmd->clothObject->tri[overlap->indexA];
tri_b = &collmd->tri[overlap->indexB];
/* fill face_a */
collpair->ap1 = tri_a->tri[0];
collpair->ap2 = tri_a->tri[1];
collpair->ap3 = tri_a->tri[2];
/* fill face_b */
collpair->bp1 = tri_b->tri[0];
collpair->bp2 = tri_b->tri[1];
collpair->bp3 = tri_b->tri[2];
{
#ifdef WITH_BULLET
// calc distance + normal
distance = plNearestPoints (
verts1[collpair->ap1].txold, verts1[collpair->ap2].txold, verts1[collpair->ap3].txold, collmd->current_x[collpair->bp1].co, collmd->current_x[collpair->bp2].co, collmd->current_x[collpair->bp3].co, collpair->pa, collpair->pb, collpair->vector );
#else
// just be sure that we don't add anything
distance = 2.0 * (double)( epsilon1 + epsilon2 + ALMOST_ZERO );
#endif
// distance -1 means no collision result
if (distance != -1.0 && (distance <= (double)(epsilon1 + epsilon2 + ALMOST_ZERO))) {
normalize_v3_v3(collpair->normal, collpair->vector);
collpair->distance = distance;
collpair->flag = 0;
collpair++;
}/*
else {
float w1, w2, w3, u1, u2, u3;
float v1[3], v2[3], relativeVelocity[3];
// calc relative velocity
// compute barycentric coordinates for both collision points
collision_compute_barycentric ( collpair->pa,
verts1[collpair->ap1].txold,
verts1[collpair->ap2].txold,
verts1[collpair->ap3].txold,
&w1, &w2, &w3 );
// was: txold
collision_compute_barycentric ( collpair->pb,
collmd->current_x[collpair->bp1].co,
collmd->current_x[collpair->bp2].co,
collmd->current_x[collpair->bp3].co,
&u1, &u2, &u3 );
// Calculate relative "velocity".
collision_interpolateOnTriangle ( v1, verts1[collpair->ap1].tv, verts1[collpair->ap2].tv, verts1[collpair->ap3].tv, w1, w2, w3 );
collision_interpolateOnTriangle ( v2, collmd->current_v[collpair->bp1].co, collmd->current_v[collpair->bp2].co, collmd->current_v[collpair->bp3].co, u1, u2, u3 );
sub_v3_v3v3(relativeVelocity, v2, v1);
if (sqrt(dot_v3v3(relativeVelocity, relativeVelocity)) >= distance)
{
// check for collision in the future
collpair->flag |= COLLISION_IN_FUTURE;
collpair++;
}
}*/
}
return collpair;
}
static void add_collision_object(Object ***objs, unsigned int *numobj, unsigned int *maxobj, Object *ob, Object *self, int level, unsigned int modifier_type)
{
CollisionModifierData *cmd= NULL;
if (ob == self)
return;
/* only get objects with collision modifier */
if (((modifier_type == eModifierType_Collision) && ob->pd && ob->pd->deflect) || (modifier_type != eModifierType_Collision))
cmd= (CollisionModifierData *)modifiers_findByType(ob, modifier_type);
if (cmd) {
/* extend array */
if (*numobj >= *maxobj) {
*maxobj *= 2;
*objs= MEM_reallocN(*objs, sizeof(Object *)*(*maxobj));
}
(*objs)[*numobj] = ob;
(*numobj)++;
}
/* objects in dupli groups, one level only for now */
if (ob->dup_group && level == 0) {
GroupObject *go;
Group *group= ob->dup_group;
/* add objects */
for (go= group->gobject.first; go; go= go->next)
add_collision_object(objs, numobj, maxobj, go->ob, self, level+1, modifier_type);
}
}
// return all collision objects in scene
// collision object will exclude self
Object **get_collisionobjects_ext(Scene *scene, Object *self, Group *group, int layer, unsigned int *numcollobj, unsigned int modifier_type, bool dupli)
{
Base *base;
Object **objs;
GroupObject *go;
unsigned int numobj= 0, maxobj= 100;
int level = dupli ? 0 : 1;
objs= MEM_callocN(sizeof(Object *)*maxobj, "CollisionObjectsArray");
/* gather all collision objects */
if (group) {
/* use specified group */
for (go= group->gobject.first; go; go= go->next)
add_collision_object(&objs, &numobj, &maxobj, go->ob, self, level, modifier_type);
}
else {
Scene *sce_iter;
/* add objects in same layer in scene */
for (SETLOOPER(scene, sce_iter, base)) {
if ( base->lay & layer )
add_collision_object(&objs, &numobj, &maxobj, base->object, self, level, modifier_type);
}
}
*numcollobj= numobj;
return objs;
}
Object **get_collisionobjects(Scene *scene, Object *self, Group *group, unsigned int *numcollobj, unsigned int modifier_type)
{
/* Need to check for active layers, too.
Otherwise this check fails if the objects are not on the same layer - DG */
return get_collisionobjects_ext(scene, self, group, self->lay | scene->lay, numcollobj, modifier_type, true);
}
static void add_collider_cache_object(ListBase **objs, Object *ob, Object *self, int level)
{
CollisionModifierData *cmd= NULL;
ColliderCache *col;
if (ob == self)
return;
if (ob->pd && ob->pd->deflect)
cmd =(CollisionModifierData *)modifiers_findByType(ob, eModifierType_Collision);
if (cmd && cmd->bvhtree) {
if (*objs == NULL)
*objs = MEM_callocN(sizeof(ListBase), "ColliderCache array");
col = MEM_callocN(sizeof(ColliderCache), "ColliderCache");
col->ob = ob;
col->collmd = cmd;
/* make sure collider is properly set up */
collision_move_object(cmd, 1.0, 0.0);
BLI_addtail(*objs, col);
}
/* objects in dupli groups, one level only for now */
if (ob->dup_group && level == 0) {
GroupObject *go;
Group *group= ob->dup_group;
/* add objects */
for (go= group->gobject.first; go; go= go->next)
add_collider_cache_object(objs, go->ob, self, level+1);
}
}
ListBase *get_collider_cache(Scene *scene, Object *self, Group *group)
{
GroupObject *go;
ListBase *objs= NULL;
/* add object in same layer in scene */
if (group) {
for (go= group->gobject.first; go; go= go->next)
add_collider_cache_object(&objs, go->ob, self, 0);
}
else {
Scene *sce_iter;
Base *base;
/* add objects in same layer in scene */
for (SETLOOPER(scene, sce_iter, base)) {
if (!self || (base->lay & self->lay))
add_collider_cache_object(&objs, base->object, self, 0);
}
}
return objs;
}
void free_collider_cache(ListBase **colliders)
{
if (*colliders) {
BLI_freelistN(*colliders);
MEM_freeN(*colliders);
*colliders = NULL;
}
}
static void cloth_bvh_objcollisions_nearcheck ( ClothModifierData * clmd, CollisionModifierData *collmd,
CollPair **collisions, CollPair **collisions_index, int numresult, BVHTreeOverlap *overlap, double dt)
{
int i;
*collisions = (CollPair *) MEM_mallocN(sizeof(CollPair) * numresult * 4, "collision array" ); // * 4 since cloth_collision_static can return more than 1 collision
*collisions_index = *collisions;
for ( i = 0; i < numresult; i++ ) {
*collisions_index = cloth_collision((ModifierData *)clmd, (ModifierData *)collmd,
overlap+i, *collisions_index, dt);
}
}
static int cloth_bvh_objcollisions_resolve ( ClothModifierData * clmd, CollisionModifierData *collmd, CollPair *collisions, CollPair *collisions_index)
{
Cloth *cloth = clmd->clothObject;
int i=0, j = 0, /*numfaces = 0, */ mvert_num = 0;
ClothVertex *verts = NULL;
int ret = 0;
int result = 0;
mvert_num = clmd->clothObject->mvert_num;
verts = cloth->verts;
// process all collisions (calculate impulses, TODO: also repulses if distance too short)
result = 1;
for ( j = 0; j < 2; j++ ) { /* 5 is just a value that ensures convergence */
result = 0;
if ( collmd->bvhtree ) {
result += cloth_collision_response_static ( clmd, collmd, collisions, collisions_index );
// apply impulses in parallel
if (result) {
for (i = 0; i < mvert_num; i++) {
// calculate "velocities" (just xnew = xold + v; no dt in v)
if (verts[i].impulse_count) {
// VECADDMUL ( verts[i].tv, verts[i].impulse, 1.0f / verts[i].impulse_count );
VECADD ( verts[i].tv, verts[i].tv, verts[i].impulse);
zero_v3(verts[i].impulse);
verts[i].impulse_count = 0;
ret++;
}
}
}
}
if (!result) {
break;
}
}
return ret;
}
// cloth - object collisions
int cloth_bvh_objcollision(Object *ob, ClothModifierData *clmd, float step, float dt )
{
Cloth *cloth= clmd->clothObject;
BVHTree *cloth_bvh= cloth->bvhtree;
unsigned int i=0, /* numfaces = 0, */ /* UNUSED */ mvert_num = 0, k, l, j;
int rounds = 0; // result counts applied collisions; ic is for debug output;
ClothVertex *verts = NULL;
int ret = 0, ret2 = 0;
Object **collobjs = NULL;
unsigned int numcollobj = 0;
if ((clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_COLLOBJ) || cloth_bvh==NULL)
return 0;
verts = cloth->verts;
/* numfaces = cloth->numfaces; */ /* UNUSED */
mvert_num = cloth->mvert_num;
////////////////////////////////////////////////////////////
// static collisions
////////////////////////////////////////////////////////////
// update cloth bvh
bvhtree_update_from_cloth ( clmd, 1 ); // 0 means STATIC, 1 means MOVING (see later in this function)
bvhselftree_update_from_cloth ( clmd, 0 ); // 0 means STATIC, 1 means MOVING (see later in this function)
collobjs = get_collisionobjects(clmd->scene, ob, clmd->coll_parms->group, &numcollobj, eModifierType_Collision);
if (!collobjs)
return 0;
/* move object to position (step) in time */
for (i = 0; i < numcollobj; i++) {
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
if (!collmd->bvhtree)
continue;
/* move object to position (step) in time */
collision_move_object ( collmd, step + dt, step );
}
do {
CollPair **collisions, **collisions_index;
ret2 = 0;
collisions = MEM_callocN(sizeof(CollPair *) *numcollobj, "CollPair");
collisions_index = MEM_callocN(sizeof(CollPair *) *numcollobj, "CollPair");
// check all collision objects
for (i = 0; i < numcollobj; i++) {
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
BVHTreeOverlap *overlap = NULL;
unsigned int result = 0;
if (!collmd->bvhtree)
continue;
/* search for overlapping collision pairs */
overlap = BLI_bvhtree_overlap(cloth_bvh, collmd->bvhtree, &result, NULL, NULL);
// go to next object if no overlap is there
if ( result && overlap ) {
/* check if collisions really happen (costly near check) */
cloth_bvh_objcollisions_nearcheck ( clmd, collmd, &collisions[i],
&collisions_index[i], result, overlap, dt/(float)clmd->coll_parms->loop_count);
// resolve nearby collisions
ret += cloth_bvh_objcollisions_resolve ( clmd, collmd, collisions[i], collisions_index[i]);
ret2 += ret;
}
if ( overlap )
MEM_freeN ( overlap );
}
rounds++;
for (i = 0; i < numcollobj; i++) {
if ( collisions[i] ) MEM_freeN ( collisions[i] );
}
MEM_freeN(collisions);
MEM_freeN(collisions_index);
////////////////////////////////////////////////////////////
// update positions
// this is needed for bvh_calc_DOP_hull_moving() [kdop.c]
////////////////////////////////////////////////////////////
/* verts come from clmd */
for (i = 0; i < mvert_num; i++) {
if ( clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL ) {
if ( verts [i].flags & CLOTH_VERT_FLAG_PINNED ) {
continue;
}
}
VECADD ( verts[i].tx, verts[i].txold, verts[i].tv );
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// Test on *simple* selfcollisions
////////////////////////////////////////////////////////////
if ( clmd->coll_parms->flags & CLOTH_COLLSETTINGS_FLAG_SELF ) {
for (l = 0; l < (unsigned int)clmd->coll_parms->self_loop_count; l++) {
/* TODO: add coll quality rounds again */
BVHTreeOverlap *overlap = NULL;
unsigned int result = 0;
// collisions = 1;
verts = cloth->verts; // needed for openMP
/* numfaces = cloth->numfaces; */ /* UNUSED */
mvert_num = cloth->mvert_num;
verts = cloth->verts;
if ( cloth->bvhselftree ) {
// search for overlapping collision pairs
overlap = BLI_bvhtree_overlap(cloth->bvhselftree, cloth->bvhselftree, &result, NULL, NULL);
// #pragma omp parallel for private(k, i, j) schedule(static)
for ( k = 0; k < result; k++ ) {
float temp[3];
float length = 0;
float mindistance;
i = overlap[k].indexA;
j = overlap[k].indexB;
mindistance = clmd->coll_parms->selfepsilon* ( cloth->verts[i].avg_spring_len + cloth->verts[j].avg_spring_len );
if ( clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL ) {
if ( ( cloth->verts [i].flags & CLOTH_VERT_FLAG_PINNED ) &&
( cloth->verts [j].flags & CLOTH_VERT_FLAG_PINNED ) )
{
continue;
}
}
if ((cloth->verts[i].flags & CLOTH_VERT_FLAG_NOSELFCOLL) ||
(cloth->verts[j].flags & CLOTH_VERT_FLAG_NOSELFCOLL))
{
continue;
}
sub_v3_v3v3(temp, verts[i].tx, verts[j].tx);
if ( ( ABS ( temp[0] ) > mindistance ) || ( ABS ( temp[1] ) > mindistance ) || ( ABS ( temp[2] ) > mindistance ) ) continue;
if (BLI_edgeset_haskey(cloth->edgeset, i, j)) {
continue;
}
length = normalize_v3(temp );
if ( length < mindistance ) {
float correction = mindistance - length;
if ( cloth->verts [i].flags & CLOTH_VERT_FLAG_PINNED ) {
mul_v3_fl(temp, -correction);
VECADD ( verts[j].tx, verts[j].tx, temp );
}
else if ( cloth->verts [j].flags & CLOTH_VERT_FLAG_PINNED ) {
mul_v3_fl(temp, correction);
VECADD ( verts[i].tx, verts[i].tx, temp );
}
else {
mul_v3_fl(temp, correction * -0.5f);
VECADD ( verts[j].tx, verts[j].tx, temp );
sub_v3_v3v3(verts[i].tx, verts[i].tx, temp);
}
ret = 1;
ret2 += ret;
}
else {
// check for approximated time collisions
}
}
if ( overlap )
MEM_freeN ( overlap );
}
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// SELFCOLLISIONS: update velocities
////////////////////////////////////////////////////////////
if (ret2) {
for (i = 0; i < cloth->mvert_num; i++) {
if ( ! ( verts [i].flags & CLOTH_VERT_FLAG_PINNED ) ) {
sub_v3_v3v3(verts[i].tv, verts[i].tx, verts[i].txold);
}
}
}
////////////////////////////////////////////////////////////
}
}
while ( ret2 && ( clmd->coll_parms->loop_count>rounds ) );
if (collobjs)
MEM_freeN(collobjs);
return 1|MIN2 ( ret, 1 );
}
BLI_INLINE void max_v3_v3v3(float r[3], const float a[3], const float b[3])
{
r[0] = max_ff(a[0], b[0]);
r[1] = max_ff(a[1], b[1]);
r[2] = max_ff(a[2], b[2]);
}
void collision_get_collider_velocity(float vel_old[3], float vel_new[3], CollisionModifierData *collmd, CollPair *collpair)
{
float u1, u2, u3;
/* compute barycentric coordinates */
collision_compute_barycentric(collpair->pb,
collmd->current_x[collpair->bp1].co,
collmd->current_x[collpair->bp2].co,
collmd->current_x[collpair->bp3].co,
&u1, &u2, &u3);
collision_interpolateOnTriangle(vel_new, collmd->current_v[collpair->bp1].co, collmd->current_v[collpair->bp2].co, collmd->current_v[collpair->bp3].co, u1, u2, u3);
/* XXX assume constant velocity of the collider for now */
copy_v3_v3(vel_old, vel_new);
}
static bool cloth_points_collision_response_static(ClothModifierData *clmd, CollisionModifierData *collmd, PartDeflect *pd,
CollPair *collpair, CollPair *collision_end, float dt)
{
bool result = false;
float restitution = (1.0f - clmd->coll_parms->damping) * (1.0f - pd->pdef_sbdamp);
float inv_dt = 1.0f / dt;
Cloth *cloth1 = clmd->clothObject;
// float w1, w2;
float u1, u2, u3;
float v1[3], v2_old[3], v2_new[3], v_rel_old[3], v_rel_new[3];
float epsilon2 = BLI_bvhtree_get_epsilon ( collmd->bvhtree );
for ( ; collpair != collision_end; collpair++ ) {
float margin_distance = (float)(collpair->distance - (double)epsilon2);
float impulse[3];
float mag_v_rel;
if (margin_distance > 0.0f)
continue;
zero_v3(impulse);
/* only handle static collisions here */
if ( collpair->flag & COLLISION_IN_FUTURE )
continue;
/* compute barycentric coordinates for both collision points */
// w1 = 1.0f - collpair->time;
// w2 = collpair->time;
/* was: txold */
collision_compute_barycentric ( collpair->pb,
collmd->current_x[collpair->bp1].co,
collmd->current_x[collpair->bp2].co,
collmd->current_x[collpair->bp3].co,
&u1, &u2, &u3 );
/* Calculate relative velocity */
copy_v3_v3(v1, cloth1->verts[collpair->ap1].tv);
collision_interpolateOnTriangle ( v2_new, collmd->current_v[collpair->bp1].co, collmd->current_v[collpair->bp2].co, collmd->current_v[collpair->bp3].co, u1, u2, u3 );
/* XXX assume constant velocity of the collider for now */
copy_v3_v3(v2_old, v2_new);
sub_v3_v3v3(v_rel_old, v1, v2_old);
sub_v3_v3v3(v_rel_new, v1, v2_new);
/* normal component of the relative velocity */
mag_v_rel = dot_v3v3(v_rel_old, collpair->normal);
/**** DEBUG ****/
BKE_sim_debug_data_add_dot(collpair->pa, 0.9, 0.2, 0.2, "collision", 833, collpair->face1, collpair->face2);
BKE_sim_debug_data_add_dot(collpair->pb, 0.2, 0.9, 0.2, "collision", 834, collpair->face1, collpair->face2);
BKE_sim_debug_data_add_line(collpair->pa, collpair->pb, 0.8, 0.8, 0.8, "collision", 835, collpair->face1, collpair->face2);
/********/
if (mag_v_rel < -ALMOST_ZERO) {
float v_nor_old, v_nor_new;
float v_tan_old[3], v_tan_new[3];
float bounce, repulse;
/* Collision response based on
* "Simulating Complex Hair with Robust Collision Handling" (Choe, Choi, Ko, ACM SIGGRAPH 2005)
* http://graphics.snu.ac.kr/publications/2005-choe-HairSim/Choe_2005_SCA.pdf
*/
v_nor_old = mag_v_rel;
v_nor_new = dot_v3v3(v_rel_new, collpair->normal);
madd_v3_v3v3fl(v_tan_old, v_rel_old, collpair->normal, -v_nor_old);
madd_v3_v3v3fl(v_tan_new, v_rel_new, collpair->normal, -v_nor_new);
repulse = -margin_distance * inv_dt + dot_v3v3(v1, collpair->normal);
if (margin_distance < -epsilon2) {
bounce = -v_nor_new + v_nor_old * restitution;
mul_v3_v3fl(impulse, collpair->normal, max_ff(repulse, bounce));
}
else {
bounce = 0.0f;
mul_v3_v3fl(impulse, collpair->normal, repulse);
}
cloth1->verts[collpair->ap1].impulse_count++;
result = true;
}
if (result) {
int i = 0;
for (i = 0; i < 3; i++) {
if (cloth1->verts[collpair->ap1].impulse_count > 0 && fabsf(cloth1->verts[collpair->ap1].impulse[i]) < fabsf(impulse[i]))
cloth1->verts[collpair->ap1].impulse[i] = impulse[i];
}
}
}
return result;
}
BLI_INLINE bool cloth_point_face_collision_params(const float p1[3], const float p2[3], const float v0[3], const float v1[3], const float v2[3],
float r_nor[3], float *r_lambda, float r_w[3])
{
float edge1[3], edge2[3], p2face[3], p1p2[3], v0p2[3];
float nor_v0p2, nor_p1p2;
sub_v3_v3v3(edge1, v1, v0);
sub_v3_v3v3(edge2, v2, v0);
cross_v3_v3v3(r_nor, edge1, edge2);
normalize_v3(r_nor);
nor_v0p2 = dot_v3v3(v0p2, r_nor);
madd_v3_v3v3fl(p2face, p2, r_nor, -nor_v0p2);
interp_weights_tri_v3(r_w, v0, v1, v2, p2face);
sub_v3_v3v3(p1p2, p2, p1);
sub_v3_v3v3(v0p2, p2, v0);
nor_p1p2 = dot_v3v3(p1p2, r_nor);
*r_lambda = (nor_p1p2 != 0.0f ? nor_v0p2 / nor_p1p2 : 0.0f);
return r_w[1] >= 0.0f && r_w[2] >= 0.0f && r_w[1] + r_w[2] <= 1.0f;
#if 0 /* XXX this method uses the intersection point, but is broken and doesn't work well in general */
float p[3], vec1[3], line[3], edge1[3], edge2[3], q[3];
float a, f, u, v;
sub_v3_v3v3(edge1, v1, v0);
sub_v3_v3v3(edge2, v2, v0);
sub_v3_v3v3(line, p2, p1);
cross_v3_v3v3(p, line, edge2);
a = dot_v3v3(edge1, p);
if (a == 0.0f) return 0;
f = 1.0f / a;
sub_v3_v3v3(vec1, p1, v0);
u = f * dot_v3v3(vec1, p);
if ((u < 0.0f) || (u > 1.0f))
return false;
cross_v3_v3v3(q, vec1, edge1);
v = f * dot_v3v3(line, q);
if ((v < 0.0f) || ((u + v) > 1.0f))
return false;
*r_lambda = f * dot_v3v3(edge2, q);
/* don't care about 0..1 lambda range here */
/*if ((*r_lambda < 0.0f) || (*r_lambda > 1.0f))
* return 0;
*/
r_w[0] = 1.0f - u - v;
r_w[1] = u;
r_w[2] = v;
r_w[3] = 0.0f;
cross_v3_v3v3(r_nor, edge1, edge2);
normalize_v3(r_nor);
return true;
#endif
}
static CollPair *cloth_point_collpair(
float p1[3], float p2[3], const MVert *mverts, int bp1, int bp2, int bp3,
int index_cloth, int index_coll, float epsilon, CollPair *collpair)
{
const float *co1 = mverts[bp1].co, *co2 = mverts[bp2].co, *co3 = mverts[bp3].co;
float lambda /*, distance1 */, distance2;
float facenor[3], v1p1[3], v1p2[3];
float w[3];
if (!cloth_point_face_collision_params(p1, p2, co1, co2, co3, facenor, &lambda, w))
return collpair;
sub_v3_v3v3(v1p1, p1, co1);
// distance1 = dot_v3v3(v1p1, facenor);
sub_v3_v3v3(v1p2, p2, co1);
distance2 = dot_v3v3(v1p2, facenor);
// if (distance2 > epsilon || (distance1 < 0.0f && distance2 < 0.0f))
if (distance2 > epsilon)
return collpair;
collpair->face1 = index_cloth; /* XXX actually not a face, but equivalent index for point */
collpair->face2 = index_coll;
collpair->ap1 = index_cloth;
collpair->ap2 = collpair->ap3 = -1; /* unused */
collpair->bp1 = bp1;
collpair->bp2 = bp2;
collpair->bp3 = bp3;
/* note: using the second point here, which is
* the current updated position that needs to be corrected
*/
copy_v3_v3(collpair->pa, p2);
collpair->distance = distance2;
mul_v3_v3fl(collpair->vector, facenor, -distance2);
interp_v3_v3v3v3(collpair->pb, co1, co2, co3, w);
copy_v3_v3(collpair->normal, facenor);
collpair->time = lambda;
collpair->flag = 0;
collpair++;
return collpair;
}
//Determines collisions on overlap, collisions are written to collpair[i] and collision+number_collision_found is returned
static CollPair *cloth_point_collision(
ModifierData *md1, ModifierData *md2,
BVHTreeOverlap *overlap, float epsilon, CollPair *collpair, float UNUSED(dt))
{
ClothModifierData *clmd = (ClothModifierData *)md1;
CollisionModifierData *collmd = (CollisionModifierData *) md2;
/* Cloth *cloth = clmd->clothObject; */ /* UNUSED */
ClothVertex *vert = NULL;
const MVertTri *vt;
const MVert *mverts = collmd->current_x;
vert = &clmd->clothObject->verts[overlap->indexA];
vt = &collmd->tri[overlap->indexB];
collpair = cloth_point_collpair(
vert->tx, vert->x, mverts,
vt->tri[0], vt->tri[1], vt->tri[2],
overlap->indexA, overlap->indexB,
epsilon, collpair);
return collpair;
}
static void cloth_points_objcollisions_nearcheck(ClothModifierData * clmd, CollisionModifierData *collmd,
CollPair **collisions, CollPair **collisions_index,
int numresult, BVHTreeOverlap *overlap, float epsilon, double dt)
{
int i;
/* can return 2 collisions in total */
*collisions = (CollPair *) MEM_mallocN(sizeof(CollPair) * numresult * 2, "collision array" );
*collisions_index = *collisions;
for ( i = 0; i < numresult; i++ ) {
*collisions_index = cloth_point_collision((ModifierData *)clmd, (ModifierData *)collmd,
overlap+i, epsilon, *collisions_index, dt);
}
}
static int cloth_points_objcollisions_resolve(ClothModifierData * clmd, CollisionModifierData *collmd, PartDeflect *pd,
CollPair *collisions, CollPair *collisions_index, float dt)
{
Cloth *cloth = clmd->clothObject;
int i = 0, mvert_num = clmd->clothObject->mvert_num;
ClothVertex *verts = cloth->verts;
int ret = 0;
// process all collisions
if ( collmd->bvhtree ) {
bool result = cloth_points_collision_response_static(clmd, collmd, pd, collisions, collisions_index, dt);
// apply impulses in parallel
if (result) {
for (i = 0; i < mvert_num; i++) {
// calculate "velocities" (just xnew = xold + v; no dt in v)
if (verts[i].impulse_count) {
// VECADDMUL ( verts[i].tv, verts[i].impulse, 1.0f / verts[i].impulse_count );
VECADD ( verts[i].tv, verts[i].tv, verts[i].impulse);
zero_v3(verts[i].impulse);
verts[i].impulse_count = 0;
ret++;
}
}
}
}
return ret;
}
// cloth - object collisions
int cloth_points_objcollision(Object *ob, ClothModifierData *clmd, float step, float dt)
{
Cloth *cloth= clmd->clothObject;
BVHTree *cloth_bvh;
int rounds = 0; // result counts applied collisions; ic is for debug output;
float round_dt = dt / (float)clmd->coll_parms->loop_count;
unsigned int i = 0, mvert_num = 0;
ClothVertex *verts = NULL;
int ret = 0, ret2 = 0;
Object **collobjs = NULL;
unsigned int numcollobj = 0;
verts = cloth->verts;
mvert_num = cloth->mvert_num;
////////////////////////////////////////////////////////////
// static collisions
////////////////////////////////////////////////////////////
// create temporary cloth points bvh
cloth_bvh = BLI_bvhtree_new(mvert_num, max_ff(clmd->coll_parms->epsilon, clmd->coll_parms->distance_repel), 4, 6);
/* fill tree */
for (i = 0; i < mvert_num; i++) {
float co[2][3];
copy_v3_v3(co[0], verts[i].x);
copy_v3_v3(co[1], verts[i].tx);
BLI_bvhtree_insert(cloth_bvh, i, co[0], 2);
}
/* balance tree */
BLI_bvhtree_balance(cloth_bvh);
collobjs = get_collisionobjects(clmd->scene, ob, clmd->coll_parms->group, &numcollobj, eModifierType_Collision);
if (!collobjs)
return 0;
/* move object to position (step) in time */
for (i = 0; i < numcollobj; i++) {
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
if (!collmd->bvhtree)
continue;
/* move object to position (step) in time */
collision_move_object ( collmd, step + dt, step );
}
do {
CollPair **collisions, **collisions_index;
ret2 = 0;
collisions = MEM_callocN(sizeof(CollPair *) *numcollobj, "CollPair");
collisions_index = MEM_callocN(sizeof(CollPair *) *numcollobj, "CollPair");
// check all collision objects
for (i = 0; i < numcollobj; i++) {
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
BVHTreeOverlap *overlap = NULL;
unsigned int result = 0;
float epsilon;
if (!collmd->bvhtree)
continue;
/* search for overlapping collision pairs */
overlap = BLI_bvhtree_overlap(cloth_bvh, collmd->bvhtree, &result, NULL, NULL);
epsilon = BLI_bvhtree_get_epsilon(collmd->bvhtree);
// go to next object if no overlap is there
if (result && overlap) {
/* check if collisions really happen (costly near check) */
cloth_points_objcollisions_nearcheck(clmd, collmd, &collisions[i], &collisions_index[i],
result, overlap, epsilon, round_dt);
// resolve nearby collisions
ret += cloth_points_objcollisions_resolve(clmd, collmd, collob->pd, collisions[i], collisions_index[i], round_dt);
ret2 += ret;
}
if (overlap)
MEM_freeN ( overlap );
}
rounds++;
for (i = 0; i < numcollobj; i++) {
if (collisions[i])
MEM_freeN(collisions[i]);
}
MEM_freeN(collisions);
MEM_freeN(collisions_index);
////////////////////////////////////////////////////////////
// update positions
// this is needed for bvh_calc_DOP_hull_moving() [kdop.c]
////////////////////////////////////////////////////////////
// verts come from clmd
for (i = 0; i < mvert_num; i++) {
if ( clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL ) {
if ( verts [i].flags & CLOTH_VERT_FLAG_PINNED ) {
continue;
}
}
VECADD ( verts[i].tx, verts[i].txold, verts[i].tv );
}
////////////////////////////////////////////////////////////
}
while ( ret2 && ( clmd->coll_parms->loop_count>rounds ) );
if (collobjs)
MEM_freeN(collobjs);
BLI_bvhtree_free(cloth_bvh);
return 1|MIN2 ( ret, 1 );
}
void cloth_find_point_contacts(Object *ob, ClothModifierData *clmd, float step, float dt,
ColliderContacts **r_collider_contacts, int *r_totcolliders)
{
Cloth *cloth= clmd->clothObject;
BVHTree *cloth_bvh;
unsigned int i = 0, mvert_num = 0;
ClothVertex *verts = NULL;
ColliderContacts *collider_contacts;
Object **collobjs = NULL;
unsigned int numcollobj = 0;
verts = cloth->verts;
mvert_num = cloth->mvert_num;
////////////////////////////////////////////////////////////
// static collisions
////////////////////////////////////////////////////////////
// create temporary cloth points bvh
cloth_bvh = BLI_bvhtree_new(mvert_num, max_ff(clmd->coll_parms->epsilon, clmd->coll_parms->distance_repel), 4, 6);
/* fill tree */
for (i = 0; i < mvert_num; i++) {
float co[6];
copy_v3_v3(&co[0*3], verts[i].x);
copy_v3_v3(&co[1*3], verts[i].tx);
BLI_bvhtree_insert(cloth_bvh, i, co, 2);
}
/* balance tree */
BLI_bvhtree_balance(cloth_bvh);
collobjs = get_collisionobjects(clmd->scene, ob, clmd->coll_parms->group, &numcollobj, eModifierType_Collision);
if (!collobjs) {
*r_collider_contacts = NULL;
*r_totcolliders = 0;
return;
}
/* move object to position (step) in time */
for (i = 0; i < numcollobj; i++) {
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
if (!collmd->bvhtree)
continue;
/* move object to position (step) in time */
collision_move_object ( collmd, step + dt, step );
}
collider_contacts = MEM_callocN(sizeof(ColliderContacts) * numcollobj, "CollPair");
// check all collision objects
for (i = 0; i < numcollobj; i++) {
ColliderContacts *ct = collider_contacts + i;
Object *collob= collobjs[i];
CollisionModifierData *collmd = (CollisionModifierData *)modifiers_findByType(collob, eModifierType_Collision);
BVHTreeOverlap *overlap;
unsigned int result = 0;
float epsilon;
ct->ob = collob;
ct->collmd = collmd;
ct->collisions = NULL;
ct->totcollisions = 0;
if (!collmd->bvhtree)
continue;
/* search for overlapping collision pairs */
overlap = BLI_bvhtree_overlap(cloth_bvh, collmd->bvhtree, &result, NULL, NULL);
epsilon = BLI_bvhtree_get_epsilon(collmd->bvhtree);
// go to next object if no overlap is there
if (result && overlap) {
CollPair *collisions_index;
/* check if collisions really happen (costly near check) */
cloth_points_objcollisions_nearcheck(clmd, collmd, &ct->collisions, &collisions_index,
result, overlap, epsilon, dt);
ct->totcollisions = (int)(collisions_index - ct->collisions);
// resolve nearby collisions
// ret += cloth_points_objcollisions_resolve(clmd, collmd, collob->pd, collisions[i], collisions_index[i], dt);
}
if (overlap)
MEM_freeN(overlap);
}
if (collobjs)
MEM_freeN(collobjs);
BLI_bvhtree_free(cloth_bvh);
////////////////////////////////////////////////////////////
// update positions
// this is needed for bvh_calc_DOP_hull_moving() [kdop.c]
////////////////////////////////////////////////////////////
// verts come from clmd
for (i = 0; i < mvert_num; i++) {
if (clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL) {
if (verts [i].flags & CLOTH_VERT_FLAG_PINNED) {
continue;
}
}
VECADD(verts[i].tx, verts[i].txold, verts[i].tv);
}
////////////////////////////////////////////////////////////
*r_collider_contacts = collider_contacts;
*r_totcolliders = numcollobj;
}
void cloth_free_contacts(ColliderContacts *collider_contacts, int totcolliders)
{
if (collider_contacts) {
int i;
for (i = 0; i < totcolliders; ++i) {
ColliderContacts *ct = collider_contacts + i;
if (ct->collisions) {
MEM_freeN(ct->collisions);
}
}
MEM_freeN(collider_contacts);
}
}
|
plot.c | #include "cplot/plot.h"
#ifdef CPLOT_USE_PARALLEL_LOOPS
#include <omp.h>
#endif
void cplot_meta_range_set_d(cplot_meta_t meta, double xa, double xb, double ya, double yb)
{
arf_set_d(&meta->xa, xa); arf_set_d(&meta->xb, xb);
arf_set_d(&meta->ya, ya); arf_set_d(&meta->yb, yb);
}
void cplot_meta_init(cplot_meta_t meta)
{
meta->maxprec = 128;
arf_init(&meta->xa); arf_init(&meta->xb);
arf_init(&meta->ya); arf_init(&meta->yb);
cplot_meta_range_set_d(meta, -5, 5, -5, 5);
}
void cplot_meta_clear(cplot_meta_t meta)
{
arf_clear(&meta->xa); arf_clear(&meta->xb);
arf_clear(&meta->ya); arf_clear(&meta->yb);
}
void cplot_domain_plot(cplot_img_t res, cplot_func_t func, cplot_color_func_t color_func, cplot_meta_t meta)
{
slong sx,sy;
sx = cplot_img_get_x(res); sy = cplot_img_get_y(res);
#ifdef CPLOT_USE_PARALLEL_LOOPS
#pragma omp parallel
#endif
{
acb_t z, w;
acb_init(z); acb_init(w);
#ifdef CPLOT_USE_PARALLEL_LOOPS
#pragma omp for collapse(2)
#endif
for (slong y = 0; y < sy; y++)
{
for (slong x = 0; x < sx; x++)
{
for (slong prec = 10; prec < meta->maxprec; prec *= 2)
{
arf_sub(arb_midref(acb_imagref(z)), &meta->yb, &meta->ya, prec, ARF_RND_DOWN);
arf_mul_ui(arb_midref(acb_imagref(z)), arb_midref(acb_imagref(z)), y, prec, ARF_RND_DOWN);
arf_div_ui(arb_midref(acb_imagref(z)), arb_midref(acb_imagref(z)), sy-1, prec, ARF_RND_DOWN);
arf_add(arb_midref(acb_imagref(z)), arb_midref(acb_imagref(z)), &meta->ya, prec, ARF_RND_DOWN);
arf_sub(arb_midref(acb_realref(z)), &meta->xb, &meta->xa, prec, ARF_RND_DOWN);
arf_mul_ui(arb_midref(acb_realref(z)), arb_midref(acb_realref(z)), x, prec, ARF_RND_DOWN);
arf_div_ui(arb_midref(acb_realref(z)), arb_midref(acb_realref(z)),sx-1, prec, ARF_RND_DOWN);
arf_add(arb_midref(acb_realref(z)), arb_midref(acb_realref(z)), &meta->xa, prec, ARF_RND_DOWN);
func(w, z, prec);
if (acb_rel_accuracy_bits(w) > 4)
break;
}
color_func(cplot_img_get_rgb(res,x,y), w, 32);
}
}
acb_clear(z);
acb_clear(w);
flint_cleanup();
}
}
|
task_late_fulfill.c | // RUN: %libarcher-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \
// RUN: %libarcher-run-race | FileCheck %s
// Checked gcc 9.2 still does not support detach clause on task construct.
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9
// clang supports detach clause since version 11.
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// icc compiler does not support detach clause.
// UNSUPPORTED: icc
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main() {
#pragma omp parallel
#pragma omp master
{
omp_event_handle_t event;
int a = 0, b = 0;
omp_event_handle_t *f_event;
#pragma omp task detach(event) depend(out : f_event) shared(f_event)
{
printf("%i: task 1\n", omp_get_thread_num());
f_event = &event;
}
usleep(10000);
#pragma omp task depend(in : f_event) shared(f_event, a, b)
{
printf("%i: task 2, %p, %i, %i\n", omp_get_thread_num(), f_event, a, b);
f_event = &event;
}
usleep(10000);
a++;
printf("%i: calling omp_fulfill_event\n", omp_get_thread_num());
omp_fulfill_event(event);
//#pragma omp task if (0) depend(in : f_event)
// {}
b++;
usleep(10000);
#pragma omp taskwait
}
return 0;
}
// no race for a++ in line 32:
// CHECK-NOT: #0 {{.*}}task_late_fulfill.c:35
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:31
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:40
|
clauses-5.c | void
foo (int *p)
{
int i, j = 0;
#pragma omp parallel if (2, 1) /* { dg-error "expected" } */
;
#pragma omp parallel num_threads (3, 4) /* { dg-error "expected" } */
;
#pragma omp teams num_teams (4, 5) /* { dg-error "expected" } */
;
#pragma omp teams thread_limit (6, 7) /* { dg-error "expected" } */
;
#pragma omp for linear (j : 8, 9) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
j += (8, 9);
#pragma omp for schedule (static, 3, 4) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp for collapse (1, 1) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp for ordered (1, 1) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp simd safelen (3, 4) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp simd simdlen (4, 8) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp simd aligned (p: 4, 8) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp teams
#pragma omp distribute dist_schedule (static, 6, 7) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp task final (8, 1) /* { dg-error "expected" } */
;
#pragma omp task priority (2, 3) /* { dg-error "expected" } */
;
#pragma omp taskloop grainsize (4, 5) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp taskloop num_tasks (5, 6) /* { dg-error "expected" } */
for (i = 0; i < 30; i++)
;
#pragma omp target device (5, 1) /* { dg-error "expected" } */
;
#pragma omp critical (baz) hint (2, 3) /* { dg-error "expected" } */
;
}
|
MatriplexSym.h | #ifndef MatriplexSym_H
#define MatriplexSym_H
#include "MatriplexCommon.h"
#include "Matriplex.h"
//==============================================================================
// MatriplexSym
//==============================================================================
namespace Matriplex
{
const idx_t gSymOffsets[7][36] =
{
{}, {},
{ 0, 1, 1, 2 },
{ 0, 1, 3, 1, 2, 4, 3, 4, 5 }, // 3
{}, {},
{ 0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20 }
};
//------------------------------------------------------------------------------
template<typename T, idx_t D, idx_t N>
class MatriplexSym
{
public:
typedef T value_type;
enum
{
/// no. of matrix rows
kRows = D,
/// no. of matrix columns
kCols = D,
/// no of elements: lower triangle
kSize = (D + 1) * D / 2,
/// size of the whole matriplex
kTotSize = N * kSize
};
T fArray[kTotSize] __attribute__((aligned(64)));
MatriplexSym() {}
MatriplexSym(T v) { SetVal(v); }
idx_t PlexSize() const { return N; }
void SetVal(T v)
{
for (idx_t i = 0; i < kTotSize; ++i)
{
fArray[i] = v;
}
}
void Add(const MatriplexSym &v)
{
for (idx_t i = 0; i < kTotSize; ++i)
{
fArray[i] += v.fArray[i];
}
}
void Scale(T scale)
{
for (idx_t i = 0; i < kTotSize; ++i)
{
fArray[i] *= scale;
}
}
T operator[](idx_t xx) const { return fArray[xx]; }
T& operator[](idx_t xx) { return fArray[xx]; }
const idx_t * Offsets() const { return gSymOffsets[D]; }
idx_t Off(idx_t i) const { return gSymOffsets[D][i]; }
const T& ConstAt(idx_t n, idx_t i, idx_t j) const { return fArray[Off(i * D + j) * N + n]; }
T& At(idx_t n, idx_t i, idx_t j) { return fArray[Off(i * D + j) * N + n]; }
T& operator()(idx_t n, idx_t i, idx_t j) { return At(n, i, j); }
const T& operator()(idx_t n, idx_t i, idx_t j) const { return ConstAt(n, i, j); }
MatriplexSym& operator=(const MatriplexSym& m)
{
memcpy(fArray, m.fArray, sizeof(T) * kTotSize); return *this;
}
void CopySlot(idx_t n, const MatriplexSym& m)
{
for (idx_t i = n; i < kTotSize; i += N)
{
fArray[i] = m.fArray[i];
}
}
void CopyIn(idx_t n, const T *arr)
{
for (idx_t i = n; i < kTotSize; i += N)
{
fArray[i] = *(arr++);
}
}
void CopyIn(idx_t n, const MatriplexSym& m, idx_t in)
{
for (idx_t i = n; i < kTotSize; i += N, in += N)
{
fArray[i] = m[in];
}
}
void Copy(idx_t n, idx_t in)
{
for (idx_t i = n; i < kTotSize; i += N, in += N)
{
fArray[i] = fArray[in];
}
}
#if defined(AVX512_INTRINSICS)
template<typename U>
void SlurpIn(const T *arr, __m512i& vi, const U&, const int N_proc = N)
{
//_mm512_prefetch_i32gather_ps(vi, arr, 1, _MM_HINT_T0);
const __m512 src = { 0 };
const __mmask16 k = N_proc == N ? -1 : (1 << N_proc) - 1;
for (int i = 0; i < kSize; ++i, ++arr)
{
//_mm512_prefetch_i32gather_ps(vi, arr+2, 1, _MM_HINT_NTA);
__m512 reg = _mm512_mask_i32gather_ps(src, k, vi, arr, sizeof(U));
_mm512_mask_store_ps(&fArray[i*N], k, reg);
}
}
/*
// Experimental methods, SlurpIn() seems to be at least as fast.
// See comments in mkFit/MkFitter.cc MkFitter::AddBestHit().
void ChewIn(const char *arr, int off, int vi[N], const char *tmp, __m512i& ui)
{
// This is a hack ... we know sizeof(Hit) = 64 = cache line = vector width.
for (int i = 0; i < N; ++i)
{
__m512 reg = _mm512_load_ps(arr + vi[i]);
_mm512_store_ps((void*) (tmp + 64*i), reg);
}
for (int i = 0; i < kSize; ++i)
{
__m512 reg = _mm512_i32gather_ps(ui, tmp + off + i*sizeof(T), 1);
_mm512_store_ps(&fArray[i*N], reg);
}
}
void Contaginate(const char *arr, int vi[N], const char *tmp)
{
// This is a hack ... we know sizeof(Hit) = 64 = cache line = vector width.
for (int i = 0; i < N; ++i)
{
__m512 reg = _mm512_load_ps(arr + vi[i]);
_mm512_store_ps((void*) (tmp + 64*i), reg);
}
}
void Plexify(const char *tmp, __m512i& ui)
{
for (int i = 0; i < kSize; ++i)
{
__m512 reg = _mm512_i32gather_ps(ui, tmp + i*sizeof(T), 1);
_mm512_store_ps(&fArray[i*N], reg);
}
}
*/
#elif defined(AVX2_INTRINSICS)
template<typename U>
void SlurpIn(const T *arr, __m256i& vi, const U&, const int N_proc = N)
{
const __m256 src = { 0 };
__m256i k = _mm256_setr_epi32( 0, 1, 2, 3, 4, 5, 6, 7 );
__m256i k_sel = _mm256_set1_epi32(N_proc);
__m256i k_master = _mm256_cmpgt_epi32(k_sel, k);
k = k_master;
for (int i = 0; i < kSize; ++i, ++arr)
{
__m256 reg = _mm256_mask_i32gather_ps(src, arr, vi, (__m256) k, sizeof(U));
// Restore mask (docs say gather clears it but it doesn't seem to).
k = k_master;
_mm256_maskstore_ps(&fArray[i*N], k, reg);
}
}
#else
void SlurpIn(const T *arr, int vi[N], const int N_proc = N)
{
// Separate N_proc == N case (gains about 7% in fit test).
if (N_proc == N)
{
for (int i = 0; i < kSize; ++i)
{
for (int j = 0; j < N; ++j)
{
fArray[i*N + j] = * (arr + i + vi[j]);
}
}
}
else
{
for (int i = 0; i < kSize; ++i)
{
for (int j = 0; j < N_proc; ++j)
{
fArray[i*N + j] = * (arr + i + vi[j]);
}
}
}
}
#endif
void CopyOut(idx_t n, T *arr) const
{
for (idx_t i = n; i < kTotSize; i += N)
{
*(arr++) = fArray[i];
}
}
void SetDiagonal3x3(idx_t n, T d)
{
T *p = fArray + n;
p[0*N] = d;
p[1*N] = 0;
p[2*N] = d;
p[3*N] = 0;
p[4*N] = 0;
p[5*N] = d;
}
MatriplexSym& Subtract(const MatriplexSym& a, const MatriplexSym& b)
{
// Does *this = a - b;
#pragma omp simd
for (idx_t i = 0; i < kTotSize; ++i)
{
fArray[i] = a.fArray[i] - b.fArray[i];
}
return *this;
}
// ==================================================================
// Super crazy shit for Kalman fit that should probably go elsewhere
// ==================================================================
void AddNoiseIntoUpperLeft3x3(T noise)
{
// XXXXX Review, cannonize
// XXX icc bitch says: loop was not vectorized: cannot vectorize empty simd loop
T *p = fArray; ASSUME_ALIGNED(p, 64);
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
p[0*N+n] += noise;
p[2*N+n] += noise;
p[5*N+n] += noise;
}
}
void InvertUpperLeft3x3()
{
typedef T TT;
T *a = fArray; ASSUME_ALIGNED(a, 64);
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
const TT c00 = a[2*N+n] * a[5*N+n] - a[4*N+n] * a[4*N+n];
const TT c01 = a[4*N+n] * a[3*N+n] - a[1*N+n] * a[5*N+n];
const TT c02 = a[1*N+n] * a[4*N+n] - a[2*N+n] * a[3*N+n];
const TT c11 = a[5*N+n] * a[0*N+n] - a[3*N+n] * a[3*N+n];
const TT c12 = a[3*N+n] * a[1*N+n] - a[4*N+n] * a[0*N+n];
const TT c22 = a[0*N+n] * a[2*N+n] - a[1*N+n] * a[1*N+n];
const TT det = a[0*N+n] * c00 + a[1*N+n] * c01 + a[3*N+n] * c02;
const TT s = TT(1) / det;
a[0*N+n] = s*c00;
a[1*N+n] = s*c01;
a[2*N+n] = s*c11;
a[3*N+n] = s*c02;
a[4*N+n] = s*c12;
a[5*N+n] = s*c22;
}
}
};
template<typename T, idx_t D, idx_t N> using MPlexSym = MatriplexSym<T, D, N>;
//==============================================================================
// Multiplications
//==============================================================================
template<typename T, idx_t D, idx_t N>
struct SymMultiplyCls
{
static void Multiply(const MPlexSym<T, D, N>& A,
const MPlexSym<T, D, N>& B,
MPlex<T, D, D, N>& C)
{
throw std::runtime_error("general symmetric multiplication not supported");
}
};
template<typename T, idx_t N>
struct SymMultiplyCls<T, 3, N>
{
static void Multiply(const MPlexSym<T, 3, N>& A,
const MPlexSym<T, 3, N>& B,
MPlex<T, 3, 3, N>& C)
{
const T *a = A.fArray; ASSUME_ALIGNED(a, 64);
const T *b = B.fArray; ASSUME_ALIGNED(b, 64);
T *c = C.fArray; ASSUME_ALIGNED(c, 64);
#ifdef MPLEX_INTRINSICS
for (idx_t n = 0; n < N; n += 64 / sizeof(T))
{
#include "intr_sym_3x3.ah"
}
#else
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
#include "std_sym_3x3.ah"
}
#endif
}
};
template<typename T, idx_t N>
struct SymMultiplyCls<T, 6, N>
{
static void Multiply(const MPlexSym<float, 6, N>& A,
const MPlexSym<float, 6, N>& B,
MPlex<float, 6, 6, N>& C)
{
const T *a = A.fArray; ASSUME_ALIGNED(a, 64);
const T *b = B.fArray; ASSUME_ALIGNED(b, 64);
T *c = C.fArray; ASSUME_ALIGNED(c, 64);
#ifdef MPLEX_INTRINSICS
for (idx_t n = 0; n < N; n += 64 / sizeof(T))
{
#include "intr_sym_6x6.ah"
}
#else
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
#include "std_sym_6x6.ah"
}
#endif
}
};
template<typename T, idx_t D, idx_t N>
void Multiply(const MPlexSym<T, D, N>& A,
const MPlexSym<T, D, N>& B,
MPlex<T, D, D, N>& C)
{
SymMultiplyCls<T, D, N>::Multiply(A, B, C);
}
//==============================================================================
// Cramer inversion
//==============================================================================
template<typename T, idx_t D, idx_t N>
struct CramerInverterSym
{
static void Invert(MPlexSym<T, D, N>& A)
{
throw std::runtime_error("general cramer inversion not supported");
}
};
template<typename T, idx_t N>
struct CramerInverterSym<T, 2, N>
{
static void Invert(MPlexSym<T, 2, N>& A)
{
typedef T TT;
T *a = A.fArray; ASSUME_ALIGNED(a, 64);
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
//const TT det = a[0*N+n] * a[2*N+n] - a[1*N+n] * a[1*N+n];
const double det = (double)a[0*N+n] * a[2*N+n] - (double)a[1*N+n] * a[1*N+n];
const TT s = TT(1) / det;
const TT tmp = s * a[2*N+n];
a[1*N+n] *= -s;
a[2*N+n] = s * a[0*N+n];
a[0*N+n] = tmp;
}
}
};
template<typename T, idx_t N>
struct CramerInverterSym<T, 3, N>
{
static void Invert(MPlexSym<T, 3, N>& A)
{
typedef T TT;
T *a = A.fArray; ASSUME_ALIGNED(a, 64);
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
const TT c00 = a[2*N+n] * a[5*N+n] - a[4*N+n] * a[4*N+n];
const TT c01 = a[4*N+n] * a[3*N+n] - a[1*N+n] * a[5*N+n];
const TT c02 = a[1*N+n] * a[4*N+n] - a[2*N+n] * a[3*N+n];
const TT c11 = a[5*N+n] * a[0*N+n] - a[3*N+n] * a[3*N+n];
const TT c12 = a[3*N+n] * a[1*N+n] - a[4*N+n] * a[0*N+n];
const TT c22 = a[0*N+n] * a[2*N+n] - a[1*N+n] * a[1*N+n];
const TT det = a[0*N+n] * c00 + a[1*N+n] * c01 + a[3*N+n] * c02;
const TT s = TT(1) / det;
a[0*N+n] = s*c00;
a[1*N+n] = s*c01;
a[2*N+n] = s*c11;
a[3*N+n] = s*c02;
a[4*N+n] = s*c12;
a[5*N+n] = s*c22;
}
}
};
template<typename T, idx_t D, idx_t N>
void InvertCramerSym(MPlexSym<T, D, N>& A)
{
CramerInverterSym<T, D, N>::Invert(A);
}
//==============================================================================
// Cholesky inversion
//==============================================================================
template<typename T, idx_t D, idx_t N>
struct CholeskyInverterSym
{
static void Invert(MPlexSym<T, D, N>& A)
{
throw std::runtime_error("general cholesky inversion not supported");
}
};
template<typename T, idx_t N>
struct CholeskyInverterSym<T, 3, N>
{
static void Invert(MPlexSym<T, 3, N>& A)
{
typedef T TT;
T *a = A.fArray;
#pragma omp simd
for (idx_t n = 0; n < N; ++n)
{
TT l0 = std::sqrt(T(1) / a[0*N+n]);
TT l1 = a[1*N+n] * l0;
TT l2 = a[2*N+n] - l1 * l1;
l2 = std::sqrt(T(1) / l2);
TT l3 = a[3*N+n] * l0;
TT l4 = (a[4*N+n] - l1 * l3) * l2;
TT l5 = a[5*N+n] - (l3 * l3 + l4 * l4);
l5 = std::sqrt(T(1) / l5);
// decomposition done
l3 = (l1 * l4 * l2 - l3) * l0 * l5;
l1 = -l1 * l0 * l2;
l4 = -l4 * l2 * l5;
a[0*N+n] = l3*l3 + l1*l1 + l0*l0;
a[1*N+n] = l3*l4 + l1*l2;
a[2*N+n] = l4*l4 + l2*l2;
a[3*N+n] = l3*l5;
a[4*N+n] = l4*l5;
a[5*N+n] = l5*l5;
// m(2,x) are all zero if anything went wrong at l5.
// all zero, if anything went wrong already for l0 or l2.
}
}
};
template<typename T, idx_t D, idx_t N>
void InvertCholeskySym(MPlexSym<T, D, N>& A)
{
CholeskyInverterSym<T, D, N>::Invert(A);
}
//==============================================================================
// End Attic, close namespace Matriplex
//==============================================================================
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
/// Check whether the given type-dependent expression will be the name of a
/// function or another callable function-like entity (e.g. a function
// template or overload set) for any substitution.
bool IsDependentFunctionNameExpr(Expr *E);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(LangOptions::FPRoundingModeKind);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation DepLinMapLastLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const Expr *CoprocArg, bool WantCDE);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
ConvPoolLayer.c | /*
* ConvPoolLayer.c
* Francesco Conti <f.conti@unibo.it>
*
* Copyright (C) 2016 ETH Zurich, University of Bologna
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#include "linalg.h"
#include "tiling.h"
#include "ConvPoolLayer.h"
#ifdef CCN_HWCE_ACCEL
#include "hwce.h"
#endif
#ifdef CCN_ENCRYPT
#include "encryption.h"
#endif
#ifdef IPC_LINALG
#include "perf_monitor.h"
#endif
#ifndef NULL
#define NULL ((void *) 0)
#endif
#ifndef PULP_CHIP
#define PULP_CHIP -1
#endif
unsigned int dmaId = -1;
#ifdef CCN_TILING_LESSTIME
#define _conv_tiling_init(); \
unsigned char (*tile_grid_nof)[layer->ntile_nif][layer->ntile_h][layer->ntile_w] = (unsigned char (*)[layer->ntile_nif][layer->ntile_h][layer->ntile_w]) layer->tile_grid_nof; \
unsigned char (*tile_grid_nif)[layer->ntile_nif][layer->ntile_h][layer->ntile_w] = (unsigned char (*)[layer->ntile_nif][layer->ntile_h][layer->ntile_w]) layer->tile_grid_nif; \
unsigned char (*tile_grid_h) [layer->ntile_nif][layer->ntile_h][layer->ntile_w] = (unsigned char (*)[layer->ntile_nif][layer->ntile_h][layer->ntile_w]) layer->tile_grid_h; \
unsigned char (*tile_grid_w) [layer->ntile_nif][layer->ntile_h][layer->ntile_w] = (unsigned char (*)[layer->ntile_nif][layer->ntile_h][layer->ntile_w]) layer->tile_grid_w; \
int _fs = layer->filter_size; \
int _nof = tile_grid_nof[aa][bb][ii][jj]; \
int _nif = tile_grid_nif[aa][bb][ii][jj]; \
int _h = tile_grid_h [aa][bb][ii][jj]; \
int _w = tile_grid_w [aa][bb][ii][jj]; \
int _oh = _h-_fs+1; \
int _ow = _w-_fs+1;
#else /* ~CCN_TILING_LESSTIME */
#define _conv_tiling_init(); \
int _fs = layer->filter_size; \
int _nof = (aa < layer->ntile_full_nof) ? layer->tiling_max_nof : layer->tlast_nof; \
int _nif = (bb < layer->ntile_full_nif) ? layer->tiling_max_nif : layer->tlast_nif; \
int _h = (ii < layer->ntile_full_h ) ? layer->tiling_max_height : layer->tlast_h; \
int _w = (jj < layer->ntile_full_w ) ? layer->tiling_max_width : layer->tlast_w; \
int _oh = _h-_fs+1; \
int _ow = _w-_fs+1;
#endif /* ~CCN_TILING_LESSTIME */
#define _conv_notiling_init(); \
int _fs = layer->filter_size; \
int _nof = layer->n_out_feat; \
int _nif = layer->n_in_feat; \
int _h = layer->height; \
int _w = layer->width; \
int _oh = _h-_fs+1; \
int _ow = _w-_fs+1;
/**
* Allocates a new ConvPoolLayer data structure and its fields (weight, bias,
* output feature maps).
*
* @return a pointer to the new ConvPoolLayer data structure.
*
* @param n_out_feat
* the number of output feature maps.
* @param n_in_feat
* the number of input feature maps.
* @param filter_size
* the size of the filters.
* @param height
* the height of the input feature maps.
* @param width
* the width of the input feature maps.
* @param activation
* 1 if activation is tanh, 0 if no activation.
* @param *x
* a *mandatory* pointer to the input feature maps.
* @param *y
* an *optional* pointer to the already-allocated output feature maps. If
* NULL, ConvPoolLayer_new() will allocate y automatically.
*/
ConvPoolLayer *ConvPoolLayer_new(
#ifdef CCN_NOALLOC
ConvPoolLayer *layer,
#endif /* CCN_NOALLOC */
const char *name,
data_t *w,
data_t *b,
data_t *x,
data_t *y,
data_t *loc_x0,
data_t *loc_x1,
data_t *loc_y0,
data_t *loc_y1,
data_t *loc_y2,
data_t *loc_y3,
data_t *loc_w0,
data_t *loc_w1,
int n_out_feat,
int n_in_feat,
int height,
int width,
int filter_size,
int activation,
int pool_size,
int parallel_type,
int tiling_max_nof,
int tiling_max_nif,
int tiling_max_height,
int tiling_max_width,
unsigned qf
) {
#ifndef CCN_NOALLOC
// build ConvPoolLayer
ConvPoolLayer *layer;
layer = ccn_malloc(sizeof(ConvPoolLayer));
#endif /* ifndef CCN_NOALLOC */
layer->name = name;
layer->n_in_feat = n_in_feat;
layer->n_out_feat = n_out_feat;
layer->filter_size = filter_size;
layer->height = height;
layer->width = width;
layer->activation = activation;
layer->parallel_type = parallel_type;
layer->w = w;
layer->b = b;
layer->x = x;
layer->y = y;
layer->qf = qf;
layer->pool_size = pool_size;
#ifndef CCN_CACHE
layer->loc_x0 = loc_x0;
layer->loc_y0 = loc_y0;
layer->loc_x1 = loc_x1;
layer->loc_y1 = loc_y1;
layer->loc_y2 = loc_y2;
layer->loc_y_tmp = loc_y3;
layer->loc_w0 = loc_w0;
layer->loc_w1 = loc_w1;
layer->loc_b = (data_t *) ccn_malloc(sizeof(data_t)*tiling_max_nof);
#endif /* ifndef CCN_CACHE */
layer->tiling_max_nof = tiling_max_nof;
layer->tiling_max_nif = tiling_max_nif;
layer->tiling_max_height = tiling_max_height;
layer->tiling_max_width = tiling_max_width;
#ifdef CCN_TILING
// define and record the number of tiles
int ntile_nof = (n_out_feat % tiling_max_nof ) ? n_out_feat / tiling_max_nof + 1 : n_out_feat / tiling_max_nof;
int ntile_nif = (n_in_feat % tiling_max_nif ) ? n_in_feat / tiling_max_nif + 1 : n_in_feat / tiling_max_nif;
// a little more complicated for H and W tiles: the last tile counts for (tiling_max_height-_fs+1)+height%(tiling_max_height-_fs+1),
// then every other tile only for tiling_max_height-_fs+1
int ntile_h;
int ntile_w;
int tlast_h;
int tlast_w;
if(height <= tiling_max_height) {
ntile_h = 1;
}
else {
// e.g. tiling_max_height = 7, height = 14, _fs = 5
// e.g. tlast = 3+2 = 5
tlast_h = (tiling_max_height-layer->filter_size+1) + height%(tiling_max_height-layer->filter_size+1);
// e.g. ntile = 1 + 9/3 = 1+3 = 4
ntile_h = 1 + (height-tlast_h) / (tiling_max_height-layer->filter_size+1);
}
if(width <= tiling_max_width) {
ntile_w = 1;
}
else {
// e.g. tiling_max_width = 20, width = 32, _fs = 5
// e.g. tlast = 16+0 = 16
tlast_w = (tiling_max_width-layer->filter_size+1) + width%(tiling_max_width-layer->filter_size+1);
// e.g. ntile = 1 + 16/16 = 1+1 = 2
ntile_w = 1 + (width-tlast_w) / (tiling_max_width-layer->filter_size+1);
}
layer->ntile_nof = ntile_nof;
layer->ntile_nif = ntile_nif;
layer->ntile_h = ntile_h;
layer->ntile_w = ntile_w;
#ifdef CCN_TILING_LESSMEM
layer->tlast_nof = n_out_feat % tiling_max_nof;
layer->tlast_nif = n_in_feat % tiling_max_nif;
layer->tlast_h = tlast_h;
layer->tlast_w = tlast_w;
layer->ntile_full_nof = ntile_nof;
layer->ntile_full_nif = ntile_nif;
layer->ntile_full_h = ntile_h;
layer->ntile_full_w = ntile_w;
#else /* ~CCN_TILING_LESSMEM */
// allocate the tile grid in a flat fashion
layer->tile_grid_nof = ccn_malloc(sizeof(unsigned char)*(ntile_nof+NB_PIPE_STAGE-1)*ntile_nif*ntile_h*ntile_w);
layer->tile_grid_nif = ccn_malloc(sizeof(unsigned char)*(ntile_nof+NB_PIPE_STAGE-1)*ntile_nif*ntile_h*ntile_w);
layer->tile_grid_h = ccn_malloc(sizeof(unsigned char)*(ntile_nof+NB_PIPE_STAGE-1)*ntile_nif*ntile_h*ntile_w);
layer->tile_grid_w = ccn_malloc(sizeof(unsigned char)*(ntile_nof+NB_PIPE_STAGE-1)*ntile_nif*ntile_h*ntile_w);
// cast the tile grid to a 4-dimensional array
unsigned char (*tile_grid_nof)[ntile_nif][ntile_h][ntile_w] = layer->tile_grid_nof;
unsigned char (*tile_grid_nif)[ntile_nif][ntile_h][ntile_w] = layer->tile_grid_nif;
unsigned char (*tile_grid_h) [ntile_nif][ntile_h][ntile_w] = layer->tile_grid_h;
unsigned char (*tile_grid_w) [ntile_nif][ntile_h][ntile_w] = layer->tile_grid_w;
#endif /* ~CCN_TILING_LESSMEM */
// fill in the tile grid
int aa, bb, ii, jj;
for(aa=0; aa<layer->ntile_nof; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
for(ii=0; ii<layer->ntile_h; ii++) {
for(jj=0; jj<layer->ntile_w; jj++) {
#ifdef CCN_TILING_LESSTIME
if(jj*(tiling_max_width-layer->filter_size+1) > width-tiling_max_width) {
tile_grid_w[aa][bb][ii][jj] = (unsigned char) tlast_w;
}
else {
tile_grid_w[aa][bb][ii][jj] = (unsigned char) tiling_max_width;
}
if(ii*(tiling_max_height-layer->filter_size+1) > height-tiling_max_height) {
tile_grid_h[aa][bb][ii][jj] = (unsigned char) tlast_h;
}
else {
tile_grid_h[aa][bb][ii][jj] = (unsigned char) tiling_max_height;
}
if(bb*tiling_max_nif > n_in_feat-tiling_max_nif) {
tile_grid_nif[aa][bb][ii][jj] = (unsigned char) n_in_feat % tiling_max_nif;
}
else {
tile_grid_nif[aa][bb][ii][jj] = (unsigned char) tiling_max_nif;
}
if(aa*tiling_max_nof > n_out_feat-tiling_max_nof) {
tile_grid_nof[aa][bb][ii][jj] = (unsigned char) n_out_feat % tiling_max_nof;
}
else {
tile_grid_nof[aa][bb][ii][jj] = (unsigned char) tiling_max_nof;
}
#else /* ~CCN_TILING_LESSTIME */
if(jj*(tiling_max_width-layer->filter_size+1) > width-tiling_max_width) {
layer->ntile_full_w = jj;
}
if(ii*(tiling_max_height-layer->filter_size+1) > height-tiling_max_height) {
layer->ntile_full_h = ii;
}
if(bb*tiling_max_nif > n_in_feat-tiling_max_nif) {
layer->ntile_full_nif = bb;
}
if(aa*tiling_max_nof > n_out_feat-tiling_max_nof) {
layer->ntile_full_nof = aa;
}
#endif /* ~CCN_TILING_LESSTIME */
}
}
}
}
#ifdef CCN_TILING_LESSTIME
for(aa=layer->ntile_nof; aa<layer->ntile_nof+NB_PIPE_STAGE-1; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
for(ii=0; ii<layer->ntile_h; ii++) {
for(jj=0; jj<layer->ntile_w; jj++) {
tile_grid_w [aa][bb][ii][jj] = 0;
tile_grid_h [aa][bb][ii][jj] = 0;
tile_grid_h [aa][bb][ii][jj] = 0;
tile_grid_nif[aa][bb][ii][jj] = 0;
tile_grid_nif[aa][bb][ii][jj] = 0;
tile_grid_nof[aa][bb][ii][jj] = 0;
tile_grid_nof[aa][bb][ii][jj] = 0;
}
}
}
}
#endif /* CCN_TILING_LESSTIME */
#else /* ~CCN_TILING */
// no tile grid
int ntile_nof = n_out_feat;
int ntile_nif = n_in_feat;
int ntile_h = height;
int ntile_w = width;
layer->ntile_nof = ntile_nof;
layer->ntile_nif = ntile_nif;
layer->ntile_h = ntile_h;
layer->ntile_w = ntile_w;
#endif /* ~CCN_TILING */
#ifdef TILING_DEBUG
printf("[ConvPoolLayer %s] NOF grid:\n", layer->name);
for(aa=0; aa<layer->ntile_nof; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
printf("aa=%d bb=%d\n", aa, bb);
for(ii=0; ii<layer->ntile_h; ii++) {
printf(" ");
for(jj=0; jj<layer->ntile_w; jj++) {
printf("%d ", tile_grid_nof[aa][bb][ii][jj]);
}
printf("\n");
}
}
}
printf("[ConvPoolLayer %s] NIF grid:\n", layer->name);
for(aa=0; aa<layer->ntile_nof; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
printf("aa=%d bb=%d\n", aa, bb);
for(ii=0; ii<layer->ntile_h; ii++) {
printf(" ");
for(jj=0; jj<layer->ntile_w; jj++) {
printf("%d ", tile_grid_nif[aa][bb][ii][jj]);
}
printf("\n");
}
}
}
printf("[ConvPoolLayer %s] H grid:\n", layer->name);
for(aa=0; aa<layer->ntile_nof; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
printf("aa=%d bb=%d\n", aa, bb);
for(ii=0; ii<layer->ntile_h; ii++) {
printf(" ");
for(jj=0; jj<layer->ntile_w; jj++) {
printf("%d ", tile_grid_h[aa][bb][ii][jj]);
}
printf("\n");
}
}
}
printf("[ConvPoolLayer %s] W grid:\n", layer->name);
for(aa=0; aa<layer->ntile_nof; aa++) {
for(bb=0; bb<layer->ntile_nif; bb++) {
printf("aa=%d bb=%d\n", aa, bb);
for(ii=0; ii<layer->ntile_h; ii++) {
printf(" ");
for(jj=0; jj<layer->ntile_w; jj++) {
printf("%d ", tile_grid_w[aa][bb][ii][jj]);
}
printf("\n");
}
}
}
#endif /* TILING_DEBUG */
#ifdef CCN_PULP_HWCE
#if PULP_CHIP==CHIP_FULMINE
hwce_enable();
#endif /* ~CHIP_FULMINE */
#endif /* ~CCN_PULP_HWCE */
#ifdef CCN_HWCE_ACCEL
hwce_config_init();
#endif
return layer;
}
void ConvPoolLayer_delete(
ConvPoolLayer *layer
) {
#ifndef CCN_CACHE
ccn_free(layer->loc_w0);
ccn_free(layer->loc_w1);
ccn_free(layer->loc_b);
#endif /* ~CCN_CACHE */
#ifdef CCN_TILING
ccn_free(layer->tile_grid_nof);
ccn_free(layer->tile_grid_nif);
ccn_free(layer->tile_grid_h);
ccn_free(layer->tile_grid_w);
#endif /* ~CCN_TILING */
ccn_free(layer);
}
static void ConvPoolLayer_pipe_fe(
ConvPoolLayer *layer,
int aa,
int bb,
int ii,
int jj
) {
#ifdef CCN_CACHE
return;
#endif
// if aa is -1, it means that this is the last tile (and bb, ii, jj also = -1)
if(aa==-1)
return;
#ifdef FETCH_PROFILE
perf_enable_all();
perf_reset();
perf_start();
#endif /* FETCH_PROFILE */
{
_conv_tiling_init()
// x strides are h-fs+1, w-fs+1 due to tile overlap
data_t *l2_x = ccn_get_tile_3d(
layer->x,
bb, ii, jj,
layer->tiling_max_nif, layer->tiling_max_height, layer->tiling_max_width,
layer->height, layer->width,
0, _fs-1, _fs-1
);
data_t *l2_y = ccn_get_tile_3d(
layer->y,
aa, ii, jj,
layer->tiling_max_nof, layer->tiling_max_height-_fs+1, layer->tiling_max_width-_fs+1,
layer->height-_fs+1, layer->width-_fs+1,
0, 0, 0
);
#if PULP_CHIP == CHIP_MIA || PULP_CHIP == CHIP_PULP3 || PULP_CHIP == CHIP_FULMINE || PULP_CHIP == CHIP_HONEY
data_t *l2_W = ccn_get_tile_2d(
layer->w,
aa, bb,
layer->tiling_max_nof*MULTIPLE8(_fs*_fs), layer->tiling_max_nif*MULTIPLE8(_fs*_fs),
layer->n_in_feat
);
#else /* ~CHIP_MIA && ~CHIP_PULP3 && ~CHIP_FULMINE && ~CHIP_HONEY */
data_t *l2_W = ccn_get_tile_2d(
layer->w,
aa, bb,
layer->tiling_max_nof*_fs*_fs, layer->tiling_max_nif*_fs*_fs,
layer->n_in_feat
);
#endif /* ~CHIP_MIA && ~CHIP_PULP3 && ~CHIP_FULMINE && ~CHIP_HONEY */
#ifdef CCN_TILING_3D
/* with no additional assumptions, the tiling grid is three-dimensional */
// X tile copy-in
ccn_memcpy_async_3d(
layer->loc_x_fe, // pointers
l2_x,
_nif, // sizes
_h,
_w*sizeof(data_t),
_h, // local strides
_w*sizeof(data_t),
layer->height, // remote strides
layer->width*sizeof(data_t)
);
#endif /* CCN_TILING_3D */
#ifdef CCN_TILING_2D
/* Assuming that tiles are internally contiguous in the j feature map dimension,
the tiling grid is two-dimensional.
Moreover, _w=layer->width */
// X tile copy-in
ccn_memcpy_async_2d(
layer->loc_x_fe, // pointers
l2_x,
_nif, // sizes
_h*_w*sizeof(data_t),
_h*_w*sizeof(data_t), // local strides
layer->height*layer->width*sizeof(data_t) // remote strides
);
#endif /* CCN_TILING_2D */
#ifdef CCN_TILING_1D
/* Assuming that tiles are internally contiguous in the i,j feature map dimensions,
the tiling grid is one-dimensional.
Moreover, _h=layer->height,_w=layer->width */
// X tile copy-in
ccn_memcpy_async(
layer->loc_x_fe, // pointers
l2_x,
_nif*_h*_w*sizeof(data_t)
);
#endif /* CCN_TILING_1D */
// W copy-in
#if PULP_CHIP == CHIP_MIA || PULP_CHIP == CHIP_PULP3 || PULP_CHIP == CHIP_FULMINE || PULP_CHIP == CHIP_HONEY
for(int a=0; a<_nof; a++) {
for(int b=0; b<_nif; b++) {
ccn_memcpy_async(
layer->loc_w_fe + a*_nif*MULTIPLE4(_fs*_fs) + b*MULTIPLE4(_fs*_fs),
l2_W + a*layer->n_in_feat*MULTIPLE8(_fs*_fs) + b*MULTIPLE8(_fs*_fs),
sizeof(data_t)*_fs*_fs
);
}
}
#else /* ~CHIP_MIA && ~CHIP_PULP3 */
if(layer->parallel_type != PARALLEL_HWCE) {
for(int a=0; a<_nof; a++) {
for(int b=0; b<_nif; b++) {
ccn_memcpy_async(
layer->loc_w_fe + a*_nif*_fs*_fs + b*_fs*_fs,
l2_W + a*layer->n_in_feat*_fs*_fs + b*_fs*_fs,
sizeof(data_t)*_fs*_fs
);
}
}
}
else {
for(int a=0; a<_nof; a++) {
for(int b=0; b<_nif; b++) {
ccn_memcpy_async(
layer->loc_w_fe + a*_nif*MULTIPLE4(_fs*_fs) + b*MULTIPLE4(_fs*_fs),
l2_W + a*layer->n_in_feat*_fs*_fs + b*_fs*_fs,
sizeof(data_t)*_fs*_fs
);
}
}
}
#endif /* ~CHIP_MIA && ~CHIP_PULP3 */
#ifdef FAKEDMA
// ccn_memcpy_wait();
#endif /* FAKEDMA */
#ifdef FETCH_CHECKSUM
int32_t sum_x = 0;
int32_t sum_W = 0;
int32_t sum_y = 0;
for(int i=0; i<_nif*_h*_w; i++) {
sum_x += layer->loc_x_fe[i];
}
if(layer->parallel_type == PARALLEL_HWCE) {
for(int a=0; a<_nof; a++) {
for(int b=0; b<_nif; b++) {
for(int i=0; i<_fs*_fs; i++) {
sum_W += layer->loc_w_fe[a*_nif*MULTIPLE4(_fs*_fs) + b*MULTIPLE4(_fs*_fs) + i];
}
}
}
}
else {
for(int i=0; i<_nof*_nif*_fs*_fs; i++) {
sum_W += layer->loc_w_fe[i];
}
}
printf("[ConvPoolLayer %s] Fetch checksum %d,%d,%d,%d: x=%d W=%d\n", layer->name, aa, bb, ii, jj, sum_x, sum_W);
#endif /* FETCH_CHECKSUM */
}
#ifdef FETCH_PROFILE
perf_stop();
int t0 = perf_get_cycles();
printf("[ConvPoolLayer %s] Fetch profiling: %d\n", layer->name, t0);
#endif /* FETCH_PROFILE */
}
static void ConvPoolLayer_pipe_ex(
ConvPoolLayer *layer,
int aa,
int bb,
int ii,
int jj
) {
// if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1)
if(aa==-1)
return;
#ifdef EXECUTE_PROFILE
perf_enable_all();
perf_reset();
perf_start();
#endif /* EXECUTE_PROFILE */
// #pragma omp single nowait
{
#ifdef INTERM_CHECKSUM
int print_flag = 0;
#endif
#ifdef CCN_TILING
_conv_tiling_init()
#else /* ~CCN_TILING */
_conv_notiling_init()
#endif /* ~CCN_TILING */
#ifndef CCN_CACHE
data_t *_x = layer->loc_x_ex;
// #define USE_TMP_BUFFER
#ifdef USE_TMP_BUFFER
data_t *_y;
data_t *_y2 = layer->loc_y_ex;
if(bb == layer->ntile_nif-1) {
_y = layer->loc_y_tmp;
}
else {
_y = layer->loc_y_ex;
}
#else /* USE_TMP_BUFFER */
data_t *_y = layer->loc_y_ex;
#endif /* USE_TMP_BUFFER */
data_t *_W = layer->loc_w_ex;
#ifndef CCN_DOUBLEBUF
// wait for the end of the fetch stage if not doing double buffering
// ccn_memcpy_wait();
// #pragma omp barrier
#endif /* ~CCN_DOUBLEBUF */
#else /* CCN_CACHE */
data_t *_x = ccn_get_tile_3d(
layer->x,
bb, ii, jj,
layer->tiling_max_nif, layer->tiling_max_height, layer->tiling_max_width,
layer->height, layer->width,
0, _fs-1, _fs-1
);
data_t *_y = ccn_get_tile_3d(
layer->y,
aa, ii, jj,
layer->tiling_max_nof, layer->tiling_max_height-_fs+1, layer->tiling_max_width-_fs+1,
layer->height-_fs+1, layer->width-_fs+1,
0, 0, 0
);
// we assume weights to be contiguous
data_t *_W = ccn_get_tile_2d(
layer->w,
aa, bb,
layer->tiling_max_nof, layer->tiling_max_nif,
layer->n_in_feat*_fs*_fs
)
#endif /* CCN_CACHE */
// loop over output features
for(int a=0; a<_nof; a++) {
// #pragma omp barrier
// y_a[i,j] = b_a for every output feature a, pixel (i,j)
if(bb == 0) {
data_t _b = layer->b[aa*layer->tiling_max_nof+a];
// #pragma omp parallel for
for(int i=0; i<_oh*_ow; i++) {
_y[a*_oh*_ow+i] = _b;
}
}
// convolution "core"
#ifndef NOCOMPUTATION
#ifndef CCN_CACHE
#ifdef CCN_PULP_HWCE
if(layer->parallel_type == PARALLEL_HWCE)
linalg_2dconv_hwce(_W, _x, _y, _h, _w, _fs, a, _nif, layer->parallel_type, layer->qf);
else
linalg_2dconv(_W, _x, _y, _h, _w, _fs, a, _nif, layer->parallel_type, layer->qf);
#else /* ~CCN_PULP_HWCE */
linalg_2dconv(_W, _x, _y, _h, _w, _fs, a, _nif, layer->parallel_type, layer->qf);
#endif /* ~CCN_PULP_HWCE */
#else /* CCN_CACHE */
linalg_2dconv(_W, _x, _y, layer->height, layer->width, _fs, a, layer->n_in_feat, layer->parallel_type, layer->qf);
#endif /* CCN_CACHE */
#endif /* NOCOMPUTATION */
#ifdef DETAILED_DEBUG
#pragma omp master
{
for(int i=0; i<_oh; i++) {
for(int j=0; j<_ow; j++) {
printf("(%d,%d): %04x\n", i, j, _y[a*_oh*_ow+i*_ow+j] & 0xffff);
}
}
}
#pragma omp barrier
#endif /* DETAILED_DEBUG */
if(bb == layer->ntile_nif-1) {
int _ps = layer->pool_size;
int _oph, _opw;
if(_ps == 2) {
_oph = _oh >> 1;
_opw = _ow >> 1;
}
else if(_ps == 4) {
_oph = _oh >> 2;
_opw = _ow >> 2;
}
else {
_oph = _oh / _ps;
_opw = _ow / _ps;
}
// #pragma omp parallel \
// firstprivate(_y2,_ps)
// {
if(layer->activation == ACTIVATION_TANH) {
// #pragma omp for \
// collapse(2)
for (int i=0; i<_oh; i++) {
for (int j=0; j<_ow; j++) {
_y[a*_oh*_ow+i*_ow+j] = ccn_tanh(_y[a*_oh*_ow+i*_ow+j]);
}
}
}
else if(layer->activation == ACTIVATION_RELU) {
// #pragma omp for \
// collapse(2)
for (int i=0; i<_oh; i++) {
for (int j=0; j<_ow; j++) {
_y[a*_oh*_ow+i*_ow+j] = ccn_relu(_y[a*_oh*_ow+i*_ow+j]);
}
}
}
// #pragma omp for \
// collapse(2)
for(int i=0; i<_oph; i++) {
for(int j=0; j<_opw; j++) {
data_t max = -DATA_T_MAX;
for(int i1=0; i1<_ps; i1++) {
for(int j1=0; j1<_ps; j1++) {
data_t xtmp = _y[((a*_oh)+(i*_ps+i1))*_ow+(j*_ps+j1)];
if(xtmp > max)
max = xtmp;
}
}
#ifdef USE_TMP_BUFFER
_y2[a*_oph*_opw+i*_opw+j] = max;
#else /* USE_TMP_BUFFER */
_y[a*_oph*_opw+i*_opw+j] = max;
#endif /* USE_TMP_BUFFER */
}
}
// }
}
#ifdef CCN_ENCRYPT
#ifdef CCN_ENCRYPT_HWCRYPT
hwcrypt_enable();
ccn_encrypt_aes_xts_hwcrypt_config();
ccn_encrypt_aes_xts_hwcrypt(_y, _y, _nof*_oh*_ow >> 1); // number of 32-bit words
#ifdef CCN_HWCE_ACCEL
hwce_enable();
#endif /* CCN_HWCE_ACCEL */
#else /* ~CCN_ENCRYPT_HWCRYPT */
ccn_encrypt_aes_xts(_y, _y, _nof*_oh*_ow*sizeof(data_t)); // number of bytes
#endif /* ~CCN_ENCRYPT_HWCRYPT */
#endif /* CCN_ENCRYPT */
#ifdef INTERM_CHECKSUM
// #pragma omp barrier
// #pragma omp master
{
int i, sum=0;
printf("[ConvPoolLayer %s] Intermediate checksum tile %d,%d,%d,%d, a=%d: ", layer->name, aa,bb,ii,jj, print_flag);
sum=0;
data_t *xt = _x + a*_nif*_h*_w;
for(i=0; i<_nif*_h*_w; i++){
sum+=xt[i]; // FIXME: it should be _x, not xt. but if i do it here => WRONG CHECKSUM (BIG MISTERY)
}
printf("xsum=%d, ", sum);
sum=0;
data_t *wt = _W + a*_nif*MULTIPLE4(_fs*_fs);
for(i=0; i<_nif*_fs*_fs; i++) {
sum+=wt[i];
}
printf("wsum=%d, ", sum);
sum=0;
data_t *yt = _y + a*_oh*_ow;
for(i=0; i<_oh*_ow; i++) {
sum+=yt[i];
}
print_flag++;
printf("ysum=%d\n", sum);
printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y);
}
// #pragma omp barrier
#endif
#ifdef CCN_CACHE
_y += (layer->height-_fs+1)*(layer->width-_fs+1);
_W += layer->n_in_feat*_fs*_fs;
#endif /* CCN_CACHE */
} /* for(int a=a_start; a<_nof; a++) */
#ifdef TILE_CHECKSUM
// #pragma omp barrier
// #pragma omp master
{
int _oph, _opw;
int _ps = layer->pool_size;
data_t *_y2 = layer->loc_y_ex;
if(_ps == 2) {
_oph = _oh >> 1;
_opw = _ow >> 1;
}
else if(_ps == 4) {
_oph = _oh >> 2;
_opw = _ow >> 2;
}
else {
_oph = _oh / _ps;
_opw = _ow / _ps;
}
int i, sum=0;
printf("[ConvPoolLayer %s] Tile checksum %d,%d,%d,%d: ", layer->name, aa,bb,ii,jj);
sum=0;
for(i=0; i<_nif*_h*_w; i++){
sum+=_x[i];
}
printf("xsum=%d, ", sum);
sum=0;
for(i=0; i<_nof*_nif*_fs*_fs; i++) {
sum+=_W[i];
}
printf("wsum=%d, ", sum);
sum=0;
for(i=0; i<_nof*_oh*_ow; i++) {
sum+=_y[i];
}
// print_flag++;
printf("ysum=%d\n", sum);
sum=0;
for(i=0; i<_nof*_oph*_opw; i++) {
sum+=_y2[i];
}
// print_flag++;
printf("y2sum=%d\n", sum);
printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y);
}
// #pragma omp barrier
#endif
}
#ifdef EXECUTE_PROFILE
perf_stop();
int t0 = perf_get_cycles();
printf("[ConvPoolLayer %s] Execute profiling: %d\n", layer->name, t0);
#endif /* EXECUTE_PROFILE */
}
static void ConvPoolLayer_pipe_wb(
ConvPoolLayer *layer,
int aa,
int bb,
int ii,
int jj
) {
#ifdef CCN_CACHE
return;
#endif
// if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1)
if(aa==-1)
return;
#ifdef WRITEBACK_PROFILE
perf_enable_all();
perf_reset();
perf_start();
#endif /* WRITEBACK_PROFILE */
// #pragma omp single
{
_conv_tiling_init();
data_t *l2_y;
// if last bb tile, then l2_y points to the max-pooled output
// if(bb != layer->ntile_nif-1) {
// l2_y = ccn_get_tile_3d(
// layer->y,
// aa, ii, jj,
// layer->tiling_max_nof, layer->tiling_max_height-_fs+1, layer->tiling_max_width-_fs+1,
// layer->height-_fs+1, layer->width-_fs+1,
// 0, 0, 0
// );
// }
// else {
int _tph, _tpw;
int _ph, _pw;
int _oph, _opw;
if(bb == layer->ntile_nif-1) {
int _ps = layer->pool_size;
if(_ps == 2) {
_tph = (layer->tiling_max_height-_fs+1) >> 1;
_tpw = (layer->tiling_max_width-_fs +1) >> 1;
_ph = (layer->height-_fs+1) >> 1;
_pw = (layer->width-_fs +1) >> 1;
_oph = _oh >> 1;
_opw = _ow >> 1;
}
else if(_ps == 4) {
_tph = (layer->tiling_max_height-_fs+1) >> 2;
_tpw = (layer->tiling_max_width-_fs +1) >> 2;
_ph = (layer->height-_fs+1) >> 2;
_pw = (layer->width-_fs +1) >> 2;
_oph = _oh >> 2;
_opw = _ow >> 2;
}
else {
_tph = (layer->tiling_max_height-_fs+1) / _ps;
_tpw = (layer->tiling_max_width-_fs +1) / _ps;
_ph = (layer->height-_fs+1) / _ps;
_pw = (layer->width-_fs +1) / _ps;
_oph = _oh / _ps;
_opw = _ow / _ps;
}
l2_y = ccn_get_tile_3d(
layer->y,
aa, ii, jj,
layer->tiling_max_nof, _tph, _tpw,
_ph, _pw,
0, 0, 0
);
#ifdef WRITEBACK_CHECKSUM
int32_t sum = 0;
for(int i=0; i<_nof*_oh*_ow; i++) {
sum += layer->loc_y_tmp[i];
}
printf("[ConvLayer %s] Writeback checksum %d,%d,%d,%d: %d\n", layer->name, aa, bb, ii, jj, sum);
sum = 0;
for(int i=0; i<_nof*_oph*_opw; i++) {
sum += layer->loc_y_wb[i];
}
// printf("[ConvPoolLayer %s] Writeback checksum %d,%d,%d,%d: %d\n", layer->name, aa, bb, ii, jj, sum);
#endif /* WRITEBACK_CHECKSUM */
#ifdef WRITEBACK_DEBUG
printf("[ConvPoolLayer %s] Writeback debug CONV %d,%d,%d,%d:\n", layer->name, aa, bb, ii, jj);
for(int i=0; i<_nof; i++) {
for(int j=0; j<_oh; j++) {
for(int k=0; k<_ow; k++) {
printf(" (%d,%d,%d): %04x\n", i,j,k, layer->loc_y_tmp[i*_oh*_ow+j*_ow+k] & 0xffff);
}
}
}
printf("[ConvPoolLayer %s] Writeback debug POOL %d,%d,%d,%d:\n", layer->name, aa, bb, ii, jj);
for(int i=0; i<_nof; i++) {
for(int j=0; j<_oph; j++) {
for(int k=0; k<_opw; k++) {
printf(" (%d,%d,%d): %04x\n", i,j,k, layer->loc_y_wb[i*_oph*_opw+j*_opw+k] & 0xffff);
}
}
}
#endif /* WRITEBACK_DEBUG */
#ifdef CCN_TILING_3D
/* with no additional assumptions, the tiling grid is three-dimensional */
// Y tile copy-out
ccn_memcpy_async_3d(
l2_y, // pointers
layer->loc_y_wb,
_nof, // sizes
_oph,
_opw*sizeof(data_t),
_ph, // remote strides
_pw*sizeof(data_t),
_oph, // local strides
_opw*sizeof(data_t)
);
#endif /* CCN_TILING_3D */
#ifdef CCN_TILING_2D
/* Assuming that tiles are internally contiguous in the j feature map dimension,
the tiling grid is two-dimensional.
Moreover, _w=layer->width */
// Y tile copy-in
ccn_memcpy_async_2d(
l2_y, // pointers
layer->loc_y_wb,
_nof, // sizes
_oph*_opw*sizeof(data_t),
_ph*_pw*sizeof(data_t), // local strides
_oph*_opw*sizeof(data_t) // remote strides
);
#endif /* CCN_TILING_2D */
#ifdef CCN_TILING_1D
/* Assuming that tiles are internally contiguous in the i,j feature map dimensions,
the tiling grid is one-dimensional.
Moreover, _h=layer->height,_w=layer->width */
// Y tile copy-in
ccn_memcpy_async(
l2_y, // pointers
layer->loc_y_wb,
_nof*_oph*_opw*sizeof(data_t)
);
#endif /* CCN_TILING_1D */
}
#ifdef FAKEDMA
// ccn_memcpy_wait();
#endif /* FAKEDMA */
}
#ifdef WRITEBACK_DEBUG
#pragma omp barrier
#endif
#ifdef WRITEBACK_PROFILE
perf_stop();
int t0 = perf_get_cycles();
printf("[ConvPoolLayer %s] Writeback profiling: %d\n", layer->name, t0);
#endif /* WRITEBACK_PROFILE */
}
/**
* Executes the given ConvPoolLayer, i.e. computes its outputs given the inputs
* defined in the data structure.
* The ConvPoolLayer computes the output of a convolutional network layer with
* 3d inputs and outputs (an array of 2d feature maps).
*
* @param *layer
* a pointer to the ConvPoolLayer data structure to execute.
*/
void ConvPoolLayer_exec(ConvPoolLayer *layer) {
// ConvPoolLayer_exec is now organized as a pipeline with the following stages
// fetch (fe) : DMA in of a tile
// execute (ex) : execution of layer
// write-back (wb) : DMA out of a tile
// all indeces have a fetch, execute and write-back version
int aa_pipe,bb_pipe,ii_pipe,jj_pipe;
int aa_fe = -1, bb_fe = -1, ii_fe = -1, jj_fe = -1;
int aa_ex = -1, bb_ex = -1, ii_ex = -1, jj_ex = -1;
int aa_wb = -1, bb_wb = -1, ii_wb = -1, jj_wb = -1;
#ifdef CCN_DOUBLEBUF
// initialize double buffering in a known state
int doublebuf_state_x_fe = 0;
int doublebuf_state_y_fe = 0;
int doublebuf_state_y_wb = 0;
#endif /* CCN_DOUBLEBUF */
#ifndef CCN_CACHE
// initialize state of fe local buffer pointers
layer->loc_x_fe = layer->loc_x0;
layer->loc_w_fe = layer->loc_w0;
layer->loc_y_fe = layer->loc_y0;
#endif /* ~CCN_CACHE */
// reset the weights!
memset(layer->loc_w0, 0, sizeof(data_t)*layer->tiling_max_nof*layer->tiling_max_nif*MULTIPLE4(layer->filter_size*layer->filter_size));
memset(layer->loc_w1, 0, sizeof(data_t)*layer->tiling_max_nof*layer->tiling_max_nif*MULTIPLE4(layer->filter_size*layer->filter_size));
#ifdef CCN_TILING
for(aa_pipe=0; aa_pipe<layer->ntile_nof+NB_PIPE_STAGE-1; aa_pipe++) {
for(ii_pipe=0; ii_pipe<layer->ntile_h; ii_pipe++) {
for(jj_pipe=0; jj_pipe<layer->ntile_w; jj_pipe++) {
for(bb_pipe=0; bb_pipe<layer->ntile_nif; bb_pipe++) {
// update state of fe indeces
if(jj_pipe<layer->ntile_w) {
jj_fe = jj_pipe;
ii_fe = ii_pipe;
bb_fe = bb_pipe;
aa_fe = aa_pipe;
}
else {
jj_fe = -1;
ii_fe = -1;
bb_fe = -1;
aa_fe = -1;
}
#ifndef CCN_CACHE
#ifdef CCN_DOUBLEBUF
// update state of fe local buffer pointers
if (doublebuf_state_x_fe == 0) {
layer->loc_x_fe = layer->loc_x0;
}
else {
layer->loc_x_fe = layer->loc_x1;
}
if (doublebuf_state_x_fe == 0) {
layer->loc_w_fe = layer->loc_w0;
}
else {
layer->loc_w_fe = layer->loc_w1;
}
if (doublebuf_state_y_fe == 0) {
layer->loc_y_fe = layer->loc_y0;
}
else if (doublebuf_state_y_fe == 1) {
layer->loc_y_fe = layer->loc_y1;
}
else {
layer->loc_y_fe = layer->loc_y2;
}
#endif /* CCN_DOUBLEBUF */
#endif /* ~CCN_CACHE */
#ifdef PIPE_DEBUG
printf("[ConvPoolLayer %s pipe] aa=%d bb=%d ii=%d jj=%d\n", layer->name, aa_pipe, bb_pipe, ii_pipe, jj_pipe);
printf(" fe: aa=%d bb=%d ii=%d jj=%d\n", aa_fe, bb_fe, ii_fe, jj_fe);
printf(" ex: aa=%d bb=%d ii=%d jj=%d\n", aa_ex, bb_ex, ii_ex, jj_ex);
printf(" wb: aa=%d bb=%d ii=%d jj=%d\n", aa_wb, bb_wb, ii_wb, jj_wb);
printf(" doublebuf states: %d %d %d\n", doublebuf_state_x_fe, doublebuf_state_y_fe, doublebuf_state_y_wb);
printf("\n");
#endif PIPE_DEBUG
#ifdef PIPE_PROFILE
reset_timer();
start_timer();
#endif /* PIPE_PROFILE */
#ifndef DISABLE_OPENMP
#pragma omp parallel num_threads(3)
#endif
{
// fetch stage
#ifndef DISABLE_OPENMP
if(omp_get_thread_num() == THREAD_FE)
#endif
ConvPoolLayer_pipe_fe(layer, aa_fe, bb_fe, ii_fe, jj_fe);
// execute stage
#ifndef DISABLE_OPENMP
if(omp_get_thread_num() == THREAD_EX)
#endif
ConvPoolLayer_pipe_ex(layer, aa_ex, bb_ex, ii_ex, jj_ex);
// write-back stage
#ifndef DISABLE_OPENMP
if(omp_get_thread_num() == THREAD_WB)
#endif
ConvPoolLayer_pipe_wb(layer, aa_wb, bb_wb, ii_wb, jj_wb);
}
#ifdef PIPE_PROFILE
stop_timer();
int t0 = get_time();
reset_timer();
printf("[ConvPoolLayer %s] Pipe profiling: %d\n", layer->name, t0);
#endif /* PIPE_PROFILE */
// update state of ex,wb indeces
jj_wb = jj_ex;
jj_ex = jj_fe;
ii_wb = ii_ex;
ii_ex = ii_fe;
bb_wb = bb_ex;
bb_ex = bb_fe;
aa_wb = aa_ex;
aa_ex = aa_fe;
// update state of ex,wb local buffers
layer->loc_x_ex = layer->loc_x_fe;
layer->loc_w_ex = layer->loc_w_fe;
layer->loc_y_wb = layer->loc_y_ex;
layer->loc_y_ex = layer->loc_y_fe;
#ifndef CCN_CACHE
#ifdef CCN_DOUBLEBUF
// switch double buffering state
if (doublebuf_state_x_fe == 0) {
doublebuf_state_x_fe = 1;
}
else {
doublebuf_state_x_fe = 0;
}
if (doublebuf_state_y_fe == 0) {
doublebuf_state_y_fe = 1;
}
else if (doublebuf_state_y_fe == 1) {
doublebuf_state_y_fe = 2;
}
else {
doublebuf_state_y_fe = 0;
}
if (doublebuf_state_y_wb == 0) {
doublebuf_state_y_wb = 1;
}
else if (doublebuf_state_y_wb == 1) {
doublebuf_state_y_wb = 2;
}
else {
doublebuf_state_y_wb = 0;
}
#endif /* CCN_DOUBLEBUF */
#endif /* ~CCN_CACHE */
}
}
}
}
#else /* ~CCN_TILING */
// fetch stage
ConvPoolLayer_pipe_fe(layer, 0, 0, 0, 0);
// execute stage
ConvPoolLayer_pipe_ex(layer, 0, 0, 0, 0);
// write-back stage
ConvPoolLayer_pipe_wb(layer, 0, 0, 0, 0);
#endif /* CCN_TILING */
}
|
GB_binop__isle_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_fp64
// A.*B function (eWiseMult): GB_AemultB__isle_fp64
// A*D function (colscale): GB_AxD__isle_fp64
// D*A function (rowscale): GB_DxB__isle_fp64
// C+=B function (dense accum): GB_Cdense_accumB__isle_fp64
// C+=b function (dense accum): GB_Cdense_accumb__isle_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_fp64
// C=scalar+B GB_bind1st__isle_fp64
// C=scalar+B' GB_bind1st_tran__isle_fp64
// C=A+scalar GB_bind2nd__isle_fp64
// C=A'+scalar GB_bind2nd_tran__isle_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isle_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_amgdd_fac_cycle.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
HYPRE_Int
hypre_BoomerAMGDD_FAC( void *amgdd_vdata, HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
HYPRE_Int cycle_type = hypre_ParAMGDDDataFACCycleType(amgdd_data);
HYPRE_Int start_level = hypre_ParAMGDDDataStartLevel(amgdd_data);
if (cycle_type == 1 || cycle_type == 2)
{
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, cycle_type, first_iteration);
}
else if (cycle_type == 3)
{
hypre_BoomerAMGDD_FAC_FCycle(amgdd_vdata, first_iteration);
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: unknown AMG-DD FAC cycle type. Defaulting to 1 (V-cycle).\n");
hypre_ParAMGDDDataFACCycleType(amgdd_data) = 1;
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, 1, first_iteration);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Cycle( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_type,
HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data);
hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data);
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
HYPRE_Int i;
// Relax on the real nodes
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 1);
// Restrict the residual at all fine points (real and ghost) and set residual at coarse points not under the fine grid
if (num_levels > 1)
{
hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], first_iteration);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0);
// Either solve on the coarse level or recurse
if (level+1 == num_levels-1)
{
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3);
}
else for (i = 0; i < cycle_type; i++)
{
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level+1, cycle_type, first_iteration);
first_iteration = 0;
}
// Interpolate up and relax
hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]);
}
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 2);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_FCycle( void *amgdd_vdata,
HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data);
hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data);
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
HYPRE_Int level;
// ... work down to coarsest ...
if (!first_iteration)
{
for (level = hypre_ParAMGDDDataStartLevel(amgdd_data); level < num_levels - 1; level++)
{
hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], 0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0);
}
}
// ... solve on coarsest level ...
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3);
// ... and work back up to the finest
for (level = num_levels - 2; level > -1; level--)
{
// Interpolate up and relax
hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]);
// V-cycle
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level, 1, 0);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Interpolate( hypre_AMGDDCompGrid *compGrid_f,
hypre_AMGDDCompGrid *compGrid_c )
{
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridP(compGrid_f),
hypre_AMGDDCompGridU(compGrid_c),
1.0, hypre_AMGDDCompGridU(compGrid_f));
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Restrict( hypre_AMGDDCompGrid *compGrid_f,
hypre_AMGDDCompGrid *compGrid_c,
HYPRE_Int first_iteration )
{
// Recalculate residual on coarse grid
if (!first_iteration)
{
hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridA(compGrid_c),
hypre_AMGDDCompGridU(compGrid_c),
1.0, hypre_AMGDDCompGridF(compGrid_c));
}
// Get update: s_l <- A_lt_l + s_l
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridA(compGrid_f),
hypre_AMGDDCompGridT(compGrid_f),
1.0, hypre_AMGDDCompGridS(compGrid_f));
// If we need to preserve the updates on the next level
if (hypre_AMGDDCompGridS(compGrid_c))
{
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridR(compGrid_f),
hypre_AMGDDCompGridS(compGrid_f),
0.0, hypre_AMGDDCompGridS(compGrid_c));
// Subtract restricted update from recalculated residual: f_{l+1} <- f_{l+1} - s_{l+1}
hypre_AMGDDCompGridVectorAxpy(-1.0, hypre_AMGDDCompGridS(compGrid_c), hypre_AMGDDCompGridF(compGrid_c));
}
else
{
// Restrict and subtract update from recalculated residual: f_{l+1} <- f_{l+1} - P_l^Ts_l
hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridR(compGrid_f),
hypre_AMGDDCompGridS(compGrid_f),
1.0, hypre_AMGDDCompGridF(compGrid_c));
}
// Zero out initial guess on coarse grid
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridU(compGrid_c), 0.0);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Relax( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Int numRelax = hypre_ParAMGDDDataFACNumRelax(amgdd_data);
HYPRE_Int i;
if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorCopy(hypre_AMGDDCompGridU(compGrid),
hypre_AMGDDCompGridTemp(compGrid));
hypre_AMGDDCompGridVectorScale(-1.0, hypre_AMGDDCompGridTemp(compGrid));
}
for (i = 0; i < numRelax; i++)
{
(*hypre_ParAMGDDDataUserFACRelaxation(amgdd_data))(amgdd_vdata, level, cycle_param);
}
if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridU(compGrid),
hypre_AMGDDCompGridTemp(compGrid));
if (hypre_AMGDDCompGridT(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridTemp(compGrid),
hypre_AMGDDCompGridT(compGrid));
}
if (hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridTemp(compGrid),
hypre_AMGDDCompGridQ(compGrid));
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Jacobi( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location);
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGDD_FAC_JacobiDevice(amgdd_vdata, level);
}
else
#endif
{
hypre_BoomerAMGDD_FAC_JacobiHost(amgdd_vdata, level);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_JacobiHost( void *amgdd_vdata,
HYPRE_Int level )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data);
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
hypre_CSRMatrix *diag;
HYPRE_Int total_real_nodes;
HYPRE_Int i, j;
// Calculate l1_norms if necessary (right now, I'm just using this vector for the diagonal of A and doing straight ahead Jacobi)
if (!hypre_AMGDDCompGridL1Norms(compGrid))
{
total_real_nodes = hypre_AMGDDCompGridNumOwnedNodes(compGrid) +
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid);
hypre_AMGDDCompGridL1Norms(compGrid) = hypre_CTAlloc(HYPRE_Real,
total_real_nodes,
memory_location);
diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++)
{
// hypre_AMGDDCompGridL1Norms(compGrid)[i] += fabs(hypre_CSRMatrixData(diag)[j]);
if (hypre_CSRMatrixJ(diag)[j] == i)
{
hypre_AMGDDCompGridL1Norms(compGrid)[i] = hypre_CSRMatrixData(diag)[j];
}
}
}
diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++)
{
// hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] += fabs(hypre_CSRMatrixData(diag)[j]);
if (hypre_CSRMatrixJ(diag)[j] == i)
{
hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] = hypre_CSRMatrixData(diag)[j];
}
}
}
}
// Allocate temporary vector if necessary
if (!hypre_AMGDDCompGridTemp2(compGrid))
{
hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate();
hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid));
}
hypre_AMGDDCompGridVectorCopy(f, hypre_AMGDDCompGridTemp2(compGrid));
hypre_AMGDDCompGridMatvec(-relax_weight, A, u, relax_weight, hypre_AMGDDCompGridTemp2(compGrid));
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u))[i] +=
hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] /
hypre_AMGDDCompGridL1Norms(compGrid)[i];
}
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u))[i] +=
hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] /
hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)];
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_GaussSeidel( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A);
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A);
HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u));
HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u));
HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f));
HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f));
HYPRE_Int i, j; // loop variables
HYPRE_Complex diagonal; // placeholder for the diagonal of A
// Do Gauss-Seidel relaxation on the owned nodes
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
// Initialize u as RHS
u_owned_data[i] = f_owned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(owned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(owned_diag)[j];
}
else
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n");
}
u_owned_data[i] /= diagonal;
}
// Do Gauss-Seidel relaxation on the nonowned nodes
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
// Initialize u as RHS
u_nonowned_data[i] = f_nonowned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(nonowned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(nonowned_diag)[j];
}
else
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n");
}
u_nonowned_data[i] /= diagonal;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_OrderedGaussSeidel( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
HYPRE_Int unordered_i, i, j; // loop variables
HYPRE_Complex diagonal; // placeholder for the diagonal of A
if (!hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid))
{
hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int,
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridMemoryLocation(compGrid));
hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid));
}
if (!hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid))
{
hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int,
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridMemoryLocation(compGrid));
hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid));
}
// Get all the info
HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u));
HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u));
HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f));
HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f));
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A);
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A);
// Do Gauss-Seidel relaxation on the nonowned real nodes
for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); unordered_i++)
{
i = hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)[unordered_i];
// Initialize u as RHS
u_nonowned_data[i] = f_nonowned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(nonowned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(nonowned_diag)[j];
}
else
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n");
}
u_nonowned_data[i] /= diagonal;
}
// Do Gauss-Seidel relaxation on the owned nodes
for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); unordered_i++)
{
i = hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)[unordered_i];
// Initialize u as RHS
u_owned_data[i] = f_owned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(owned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(owned_diag)[j];
}
else
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n");
}
u_owned_data[i] /= diagonal;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_CFL1Jacobi( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location);
if (exec == HYPRE_EXEC_DEVICE)
{
if (cycle_param == 1)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1);
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
}
else if (cycle_param == 2)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1);
}
else
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
}
}
else
#endif
{
if (cycle_param == 1)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1);
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
}
else if (cycle_param == 2)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1);
}
else
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_CFL1JacobiHost( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int relax_set )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data);
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(hypre_AMGDDCompGridA(compGrid));
HYPRE_Complex *owned_u = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridU(compGrid)));
HYPRE_Complex *nonowned_u = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridU(compGrid)));
HYPRE_Complex *owned_f = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridF(compGrid)));
HYPRE_Complex *nonowned_f = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridF(compGrid)));
HYPRE_Real *l1_norms = hypre_AMGDDCompGridL1Norms(compGrid);
HYPRE_Int *cf_marker = hypre_AMGDDCompGridCFMarkerArray(compGrid);
HYPRE_Complex *owned_tmp;
HYPRE_Complex *nonowned_tmp;
HYPRE_Int i, j;
HYPRE_Real res;
/*-----------------------------------------------------------------
* Create and initialize Temp2 vector if not done before.
*-----------------------------------------------------------------*/
if (!hypre_AMGDDCompGridTemp2(compGrid))
{
hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate();
hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid));
}
owned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)));
nonowned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)));
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
owned_tmp[i] = owned_u[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedNodes(compGrid); i++)
{
nonowned_tmp[i] = nonowned_u[i];
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
if (cf_marker[i] == relax_set)
{
res = owned_f[i];
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
res -= hypre_CSRMatrixData(owned_diag)[j] * owned_tmp[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
res -= hypre_CSRMatrixData(owned_offd)[j] * nonowned_tmp[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
owned_u[i] += (relax_weight * res)/l1_norms[i];
}
}
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
if (cf_marker[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] == relax_set)
{
res = nonowned_f[i];
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
res -= hypre_CSRMatrixData(nonowned_diag)[j] * nonowned_tmp[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
res -= hypre_CSRMatrixData(nonowned_offd)[j] * owned_tmp[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
nonowned_u[i] += (relax_weight * res)/l1_norms[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)];
}
}
return hypre_error_flag;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",fx_op);
*fx_op=(char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",fx_op);
*fx_op=(char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",fx_op);
*fx_op=(char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",fx_op);
*fx_op=(char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",fx_op);
*fx_op=(char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",fx_op);
*fx_op=(char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",fx_op);
*fx_op=(char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",fx_op);
*fx_op=(char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",fx_op);
*fx_op=(char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",fx_op);
*fx_op=(char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",fx_op);
*fx_op=(char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",fx_op);
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireQuantumMemory(1,sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseSubscription(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
FxParseSubscription(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent);
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent);
FxParseSubscription(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
GB_unop__identity_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fp64)
// op(A') function: GB (_unop_tran__identity_bool_fp64)
// C type: bool
// A type: double
// cast: bool cij = (aij != 0)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (aij != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (aij != 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
bool z = (aij != 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
bool z = (aij != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lis_matvec_dia.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
void lis_matvec_dia(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,is,ie,js,je,jj,ii;
LIS_INT n,np,nnd,k;
LIS_INT my_rank,nprocs;
if( A->is_splited )
{
n = A->n;
np = A->np;
#ifdef _OPENMP
#pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank)
#endif
{
#ifdef _OPENMP
nprocs = omp_get_max_threads();
my_rank = omp_get_thread_num();
#else
nprocs = 1;
my_rank = 0;
#endif
LIS_GET_ISIE(my_rank,nprocs,n,is,ie)
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is;i<ie;i++)
{
y[i] = A->D->value[i] * x[i];
}
for(j=0;j<A->L->nnd;j++)
{
jj = A->L->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*A->L->nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[i] += A->L->value[k + ii] * x[jj+i];
ii++;
}
}
for(j=0;j<A->U->nnd;j++)
{
jj = A->U->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*A->U->nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[i] += A->U->value[k + ii] * x[jj+i];
ii++;
}
}
}
}
else
{
n = A->n;
np = A->np;
nnd = A->nnd;
#ifdef _OPENMP
#pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank)
#endif
{
#ifdef _OPENMP
nprocs = omp_get_max_threads();
my_rank = omp_get_thread_num();
#else
nprocs = 1;
my_rank = 0;
#endif
LIS_GET_ISIE(my_rank,nprocs,n,is,ie)
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is; i<ie; i++)
{
y[i] = 0.0;
}
for(j=0;j<nnd;j++)
{
jj = A->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[i] += A->value[k + ii] * x[jj+i];
ii++;
}
}
}
}
}
void lis_matvect_dia(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,is,ie,js,je,jj,ii;
LIS_INT n,nnd,np,k;
LIS_INT my_rank,nprocs;
#ifdef _OPENMP
LIS_SCALAR t,*w;
#endif
if( A->is_splited )
{
n = A->n;
#ifdef _OPENMP
#pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank)
#endif
{
#ifdef _OPENMP
nprocs = omp_get_max_threads();
my_rank = omp_get_thread_num();
#else
nprocs = 1;
my_rank = 0;
#endif
LIS_GET_ISIE(my_rank,nprocs,n,is,ie)
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is; i<ie; i++)
{
y[i] = 0.0;
}
for(j=0;j<A->L->nnd;j++)
{
jj = A->L->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*A->L->nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[jj+i] += A->L->value[k + ii] * x[i];
ii++;
}
}
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is;i<ie;i++)
{
y[i] += A->D->value[i] * x[i];
}
for(j=0;j<A->U->nnd;j++)
{
jj = A->U->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*A->U->nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[jj+i] += A->U->value[k + ii] * x[i];
ii++;
}
}
}
}
else
{
n = A->n;
np = A->np;
nnd = A->nnd;
#ifdef _OPENMP
nprocs = omp_get_max_threads();
w = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_dia::w" );
#pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank)
{
nprocs = omp_get_max_threads();
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie)
#pragma omp for
for(j=0;j<nprocs;j++)
{
memset( &w[j*np], 0, np*sizeof(LIS_SCALAR) );
}
for(j=0;j<nnd;j++)
{
jj = A->index[j];
js = _max(is,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?ie:_min(ie,np-jj);
#else
je = _min(ie,n-jj);
#endif
k = is*nnd + j*(ie-is);
ii = js-is;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
w[my_rank*np + jj+i] += A->value[k + ii] * x[i];
ii++;
}
}
#pragma omp barrier
#pragma omp for
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<np;i++)
{
t = 0.0;
for(j=0;j<nprocs;j++)
{
t += w[j*np+i];
}
y[i] = t;
}
}
lis_free(w);
#else
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<np;i++)
{
y[i] = 0.0;
}
for(j=0;j<nnd;j++)
{
jj = A->index[j];
js = _max(0,-jj);
#ifdef USE_MPI
je = jj<=(np-n)?n:_min(n,np-jj);
#else
je = _min(n,n-jj);
#endif
k = j*n;
ii = js;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=js;i<je;i++)
{
y[jj+i] += A->value[k + ii] * x[i];
ii++;
}
}
#endif
}
}
|
GrB_Vector_size.c | //------------------------------------------------------------------------------
// GrB_Vector_size: dimension of a sparse vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Vector_size // get the dimension of a vector
(
GrB_Index *n, // dimension is n-by-1
const GrB_Vector v // vector to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Vector_size (&n, v)") ;
GB_RETURN_IF_NULL (n) ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT (GB_VECTOR_OK (v)) ;
//--------------------------------------------------------------------------
// get the size
//--------------------------------------------------------------------------
(*n) = v->vlen ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
target_data-4.c | /* { dg-do run } */
#include <stdlib.h>
#define EPS 0.000001
const int MAX = 1800;
void check (double *a, double *b, int N)
{
int i;
for (i = 0; i < N; i++)
if (a[i] - b[i] > EPS || b[i] - a[i] > EPS)
abort ();
}
void init (double *a1, double *a2, int N)
{
double s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void vec_mult_ref (double *p1, double *v3, double *v4, int N)
{
int i;
for (i = 0; i < N; i++)
p1[i] = v3[i] * v4[i];
}
void foo_ref (double *p0, double *v1, double *v2, int N)
{
init (v1, v2, N);
vec_mult_ref (p0, v1, v2, N);
}
void vec_mult (double *p1, double *v3, double *v4, int N)
{
int i;
#pragma omp target map(to: v3[0:N], v4[:N]) map(from: p1[0:N])
#pragma omp parallel for
for (i = 0; i < N; i++)
p1[i] = v3[i] * v4[i];
}
void foo (double *p0, double *v1, double *v2, int N)
{
init (v1, v2, N);
#pragma omp target data map(to: v1[0:N], v2[:N]) map(from: p0[0:N])
vec_mult (p0, v1, v2, N);
}
int main ()
{
double *p1 = (double *) malloc (MAX * sizeof (double));
double *p2 = (double *) malloc (MAX * sizeof (double));
double *v1 = (double *) malloc (MAX * sizeof (double));
double *v2 = (double *) malloc (MAX * sizeof (double));
foo_ref (p1, v1, v2, MAX);
foo (p2, v1, v2, MAX);
check (p1, p2, MAX);
free (p1);
free (p2);
free (v1);
free (v2);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t3+Nx,256),floord(Nt+Nx-4,256)),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
parallelfor.c | #include <omp.h>
#define N 10
int main (int argc, char *argv[]) {
double a[N], b[N];
int i, sum;
#pragma omp parallel
#pragma omp single
for (i=0; i < N; i++)
a[i] = 0;
/*
// This omp construct is completely eliminated
#pragma omp parallel for
for (i=0; i < N-1; i++)
b[i+1] = a[i+1] + 2*i;
// These nested omp constructs are completely eliminated
#pragma omp parallel
{
#pragma omp for
for (i=0; i < N-1; i++)
b[i+1] = a[i+1] + 2*i;
}
#pragma omp parallel for
for (i=0; i < N; i++) {
a[i] = 0.0;
b[i] = a[i];
b[i] = a[i] + b[i];
}
#pragma omp parallel for
for (i=0; i < N-1; i++) {
//a[i+1] = a[1+i] + 1;
a[i+2-1] = a[(-2*3) + 2*i - i + 7] + 1;
}
#pragma omp parallel for private(sum)
for (i=0; i < N; i++)
sum = sum + i;
#pragma omp parallel for
for (i=0; i < N; i++)
sum = sum + i;
*/
}
|
statistical_outlier_detection.h | /// \file
/// Maintainer: Felice Serena
///
#pragma once
#include "spatial_oracle.h"
#include <boost/log/trivial.hpp>
namespace MouseTrack {
namespace impl {
/// true, if at least one component of v1 is strictly larger than v2
template <typename Vec1, typename Vec2>
bool isLarger(const Vec1 &v1, const Vec2 &v2) {
return (v1.array() > v2.array()).any();
}
} // namespace impl
using namespace impl;
/// Based on: Towards 3D point cloud based object maps for household
/// environments, Bogdan Rusu, section 4.1
///
/// Idea: We characterize outliers based on their local neighborhood. For this
/// we calculate the mean and standard deviation of nearest neighbor distances.
///
/// Then we trim points outside mu +- alpha * sigma
///
/// k: number of nearest neighbors to take into account
///
/// mu: mean of k nearest neighbors
///
/// sigma: standard deviation of nearest neighbors
///
/// alpha: decides how much variance we want to allow
///
/// pts: #D x #P matrix
///
/// oracle: A spatial oracle ready for queries on pts.
template <typename PointList, typename Precision>
std::vector<size_t>
statisticalOutlierDetection(const PointList &pts,
const SpatialOracle<PointList, Precision> *oracle,
Precision alpha, unsigned int k) {
BOOST_LOG_TRIVIAL(trace) << "Removing outliers with alpha = " << alpha
<< " and k = " << k << " on " << pts.cols()
<< " points.";
// set a flag, whether it's an outlier
std::vector<int> outlierMap(pts.cols());
auto allNeighbors = oracle->find_closest(pts, k);
#pragma omp parallel for
for (int i = 0; i < pts.cols(); ++i) {
if (i % 1024 * 16 == 0) {
BOOST_LOG_TRIVIAL(trace)
<< "outlier detection: checking i: " << i << std::flush;
}
const auto &neighbors = allNeighbors[i];
if (neighbors.size() <= 1) {
// classify lonely points as outliers?
// should this happen at all?
BOOST_LOG_TRIVIAL(debug)
<< "Found lonely point " << i << ", classifying as outlier";
outlierMap[i] = 1;
continue;
}
PointList locals(pts.rows(), neighbors.size());
for (size_t n = 0; n < neighbors.size(); ++n) {
locals.col(n) = pts.col(neighbors[n]);
}
auto mean = (locals.rowwise().sum() / locals.cols()).eval();
auto diffs = locals.colwise() - mean;
auto variance =
((diffs.array() * diffs.array()).rowwise().sum()) / locals.cols();
auto stddev = alpha * variance.array().sqrt();
// p is inlier iff p in [mean - stddev, mean + stddev]
auto centered = (pts.col(i) - mean).array().abs();
if (isLarger(centered, stddev)) {
// outlier
outlierMap[i] = 1;
}
}
std::vector<size_t> outliers;
// collect outliers
for (int i = 0; i < pts.cols(); ++i) {
if (outlierMap[i] == 1) {
outliers.push_back(i);
}
}
return outliers;
}
} // namespace MouseTrack
|
explicit_residualbased_predictorcorrector_velocity_bossak_scheme.h | /*
==============================================================================
KratosStructuralApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: Kazem $
* Date: $Date: 2008-07-25 14:48:17 $
* Revision: $Revision: 1.1 $
*
* ***********************************************************/
#if !defined(KRATOS_EXPLICIT_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_SCHEME )
#define KRATOS_EXPLICIT_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_SCHEME
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "custom_strategies/strategies/residualbased_predictorcorrector_velocity_bossak_scheme.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "custom_processes/explicit_dt.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
/* namespace VelocityBossakAuxiliaries
{
Matrix mMass;
Matrix mDamp;
Vector mvel;
Vector macc;
Vector maccold;
}
*/
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
This class provides the implementation of the basic tasks that are needed by the solution strategy.
It is intended to be the place for tailoring the solution strategies to problem specific tasks.
Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme : public ResidualBasedPredictorCorrectorVelocityBossakScheme<TSparseSpace,TDenseSpace>
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme);
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme(double NewAlphaBossak, double MoveMeshStrategy)
:ResidualBasedPredictorCorrectorVelocityBossakScheme<TSparseSpace,TDenseSpace>(NewAlphaBossak,MoveMeshStrategy)
{
//default values for the Newmark Scheme
//mAlphaBossak = NewAlphaBossak;
//mBetaNewmark = 0.25*pow((1.00-mAlphaBossak),2);
mGamma = 0.5-NewAlphaBossak;
//mMeshVelocity = MoveMeshStrategy;
//mGammaNewmark = 1.0;
//mBetaNewmark = 0.5;
//sizing work matrices
//mMass.resize(10,10);
//mDamp.resize(10,10);
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
std::cout << "using the ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible" << std::endl;
}
/** Destructor.
*/
virtual ~ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme() {}
/*@} */
/**@name Operators
*/
/*@{ */
/**
Performing the update of the solution.
*/
//************************************************************************************************
//************************************************************************************************
void Initialize(
ModelPart& r_model_part
)
{
KRATOS_TRY
//mSchemeIsInitialized = true;
ModelPart::ElementsContainerType::iterator elem_bg = r_model_part.ElementsBegin();
int n_elems = r_model_part.Elements().size();
ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin();
int n_nodes = r_model_part.Nodes().size();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
for ( int jj=0; jj<n_elems; ++jj)
{
ModelPart::ElementsContainerType::iterator elem = elem_bg + jj;
array_1d<double,3> mass_vec = ZeroVector(3);
elem->Calculate(VELOCITY, mass_vec,CurrentProcessInfo);//write on air water and ebs_vel to calculate mass
//add velocity mass
double air_water = elem->GetValue(IS_WATER_ELEMENT);
Element::GeometryType& geom = elem->GetGeometry();
for (unsigned int i = 0; i <geom.size(); i++)
{
geom[i].FastGetSolutionStepValue(NODAL_MASS) += mass_vec[0];
if(air_water == 1.0)
geom[i].FastGetSolutionStepValue(NODAL_MAUX) += mass_vec[1];
if(air_water == 0.0)
geom[i].FastGetSolutionStepValue(NODAL_PAUX) += mass_vec[1];
}
}
#pragma omp parallel for firstprivate(n_nodes, it_begin)
for( int kkk = 0; kkk < n_nodes; kkk++)
{
ModelPart::NodesContainerType::iterator ind = it_begin+kkk;
ind->FastGetSolutionStepValue(NODAL_MASS,1 ) = ind->FastGetSolutionStepValue(NODAL_MASS );
ind->FastGetSolutionStepValue(NODAL_MAUX,1 ) = ind->FastGetSolutionStepValue(NODAL_MAUX );
ind->FastGetSolutionStepValue(NODAL_PAUX,1 ) = ind->FastGetSolutionStepValue(NODAL_PAUX );
}
KRATOS_CATCH("")
}
//************************************************************************************************
//************************************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b);
double DeltaTime = CurrentProcessInfo[DELTA_TIME];
// double calc_dt = 1.0;
// double& DeltaTime = r_model_part.GetProcessInfo()[DELTA_TIME];
// for(typename ModelPart::ElementsContainerType::iterator elem = r_model_part.ElementsBegin(); elem != r_model_part.ElementsEnd(); elem++)
// {
// //calculate min_dt
// elem->Calculate(DELTA_TIME, calc_dt, CurrentProcessInfo);
// if(calc_dt < DeltaTime)
// DeltaTime = 0.7*calc_dt;
//
// }
// double DeltaTime = CurrentProcessInfo[DELTA_TIME];
if (DeltaTime == 0)
KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 in the Bossak Scheme ... check if the time step is created correctly for the current model part", "");
//initializing constants
(this)->ma0 = 1.0 / (mGamma * DeltaTime);
(this)->ma1 = 0.0;
(this)->ma2 = (-1 + mGamma) / mGamma;
(this)->ma3 = DeltaTime;
(this)->ma4 = pow(DeltaTime, 2)*0.5;
(this)->ma5 = 0.0;
(this)->mam = 1.0 / (mGamma * DeltaTime);
}
//***************************************************************************
//predicts the solution at the current step as
// v = vold
virtual void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b
)
{
std::cout << "prediction" << std::endl;
KRATOS_WATCH("PREDICT of ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme");
ModelPart::NodesContainerType::iterator it_begin = rModelPart.NodesBegin();
int n_nodes = rModelPart.Nodes().size();
#pragma omp parallel for firstprivate(n_nodes, it_begin)
for( int kkk = 0; kkk < n_nodes; kkk++)
{
ModelPart::NodesContainerType::iterator itNode = it_begin+kkk;
array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
//predicting velocity
//ATTENTION::: the prediction is performed only on free nodes
array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION);
if ((itNode->pGetDof(VELOCITY_X))->IsFree())
{
(CurrentAcceleration[0]) = OldAcceleration[0];
CurrentVelocity[0] = OldVelocity[0] + OldAcceleration[0]/(this)->ma0;
}
if (itNode->pGetDof(VELOCITY_Y)->IsFree())
{
(CurrentAcceleration[1]) = OldAcceleration[1];
CurrentVelocity[1] = OldVelocity[1] + OldAcceleration[1]/(this)->ma0;
}
if (itNode->HasDofFor(VELOCITY_Z))
{
if (itNode->pGetDof(VELOCITY_Z)->IsFree())
(CurrentAcceleration[2]) = OldAcceleration[2];
CurrentVelocity[2] = OldVelocity[2] + OldAcceleration[2]/(this)->ma0;
}
// UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration);
if ((this)->mMeshVelocity == 2) //Lagrangian
{
array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0);
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY) ) = itNode->FastGetSolutionStepValue(VELOCITY);
// (this)->UpdateDisplacement(CurrentDisplacement,OldDisplacement,OldVelocity,OldAcceleration,CurrentAcceleration);
CurrentDisplacement = OldDisplacement;
// CurrentDisplacement = ZeroVector(3);
}
}
std::cout << "end of prediction" << std::endl;
}
//***************************************************************************
//***************************************************************************
virtual void InitializeNonLinIteration(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin();
int n_nodes = r_model_part.Nodes().size();
#pragma omp parallel for firstprivate(n_nodes, it_begin)
for( int kkk = 0; kkk < n_nodes; kkk++)
{
ModelPart::NodesContainerType::iterator ind = it_begin+kkk;
ind->FastGetSolutionStepValue(NODAL_MASS) = 0.0;
ind->FastGetSolutionStepValue(RHS) = ZeroVector(3);
}//end of loop over nodes
KRATOS_WATCH("inside initialize nonlinear iteration 000000000000000000000000000000000");
//loop on nodes to compute ADVPROJ CONVPROJ NODALAREA
array_1d<double,3> mass_vec = ZeroVector(3);
// double calc_dt = 0.0;
// double& existing_dt = r_model_part.GetProcessInfo()[DELTA_TIME];
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
for(typename ModelPart::ElementsContainerType::iterator elem = r_model_part.ElementsBegin(); elem != r_model_part.ElementsEnd(); elem++)
{
mass_vec = ZeroVector(3);
elem->Calculate(VELOCITY, mass_vec,CurrentProcessInfo);//write on air water and ebs_vel to calculate mass
//calculate min_dt
// elem->Calculate(DELTA_TIME, calc_dt, CurrentProcessInfo);
// if(calc_dt < existing_dt)
// existing_dt = 0.7*calc_dt;
//add velocity mass
Element::GeometryType& geom = elem->GetGeometry();
for (unsigned int i = 0; i <geom.size(); i++)
geom[i].FastGetSolutionStepValue(NODAL_MASS) += mass_vec[0];
//add neighbors mass for shell
// unsigned int nodes_num = geom.size();
// unsigned int dim = elem->GetGeometry().WorkingSpaceDimension();
//
// if(nodes_num == dim)
// {
// WeakPointerVector< Node < 3 > >& neighb = elem->GetValue(NEIGHBOUR_NODES);
//
// for (unsigned int ind = 0; ind < 3; ind++)
// if (neighb[ind].Id() != geom[ind].Id())
// neighb[ind].FastGetSolutionStepValue(NODAL_MASS) += mass_vec[0];
//
// }
}
KRATOS_WATCH("inside initialize nonlinear iteration 11111111111111111111");
KRATOS_WATCH("END OF INITIALIZE NonLinIteration");
KRATOS_CATCH("")
}
//************************************************************************************************
//************************************************************************************************
virtual void Update(
ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b
)
{
KRATOS_TRY
KRATOS_WATCH("inside update of ExplicitResidualBasedPredictorCorrectorVelocityBossakScheme");
ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin();
int n_nodes = r_model_part.Nodes().size();
//dt factor
// ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// double GammaNewmark = 0.5 - NewAlphaBossak;
// double DeltaTime = CurrentProcessInfo[DELTA_TIME];
// double time_fac = 1.0 / (mGamma * DeltaTime);
//update of Acceleration (by DOF)
for( int kkk = 0; kkk < n_nodes; kkk++)
{
ModelPart::NodesContainerType::iterator ind = it_begin+kkk;
//get velocity
array_1d<double,3>& Acce= ind->FastGetSolutionStepValue(ACCELERATION);
Acce = ZeroVector(3);
//get mass
double vel_mass = ind->FastGetSolutionStepValue(NODAL_MASS);
//get RHS
const array_1d<double,3> rhs_vel = ind->FastGetSolutionStepValue(RHS);//deine temlate dim
// vel_mass *= time_fac;
// KRATOS_WATCH(rhs_water_p);
// KRATOS_WATCH(rhs_water_p/water_p_mass);
//update velocity
if( (ind->pGetDof(VELOCITY_X))->IsFixed() == false )
Acce[0] = rhs_vel[0]/vel_mass;
if( (ind->pGetDof(VELOCITY_Y))->IsFixed() == false )
Acce[1] = rhs_vel[1]/vel_mass;
if( ind->HasDofFor(VELOCITY_Z))
if( ind->pGetDof(VELOCITY_Z)->IsFixed() == false )
Acce[2] = rhs_vel[2]/vel_mass;
//updating time derivatives (nodally for efficiency)
//array_1d<double,3> DeltaVel;
//double DeltaWaterPressure = 0.0;
//double DeltaAirPressure = 0.0;
// if(i->FastGetSolutionStepValue(AIR_PRESSURE) < 160.0)
// {
// i->FastGetSolutionStepValue(AIR_PRESSURE) = 160.0;//considering min ro = .01
// }
// if(i->FastGetSolutionStepValue(WATER_PRESSURE) < 600000.0)//this is considering that min density of water is 997 (996.69 is w pressure zero)
// {
// i->FastGetSolutionStepValue(WATER_PRESSURE) = 600000.0;//considering min ro = .01
// }
// noalias(DeltaVel) = (ind)->FastGetSolutionStepValue(VELOCITY) - (i)->FastGetSolutionStepValue(VELOCITY,1);
array_1d<double,3>& CurrentDisplacement = (ind)->FastGetSolutionStepValue(DISPLACEMENT,0);
array_1d<double,3>& OldDisplacement = (ind)->FastGetSolutionStepValue(DISPLACEMENT,1);
array_1d<double,3>& OldAcceleration = (ind)->FastGetSolutionStepValue(ACCELERATION,1);
array_1d<double,3>& CurrentVelocity = (ind)->FastGetSolutionStepValue(VELOCITY,0);
array_1d<double,3>& OldVelocity = (ind)->FastGetSolutionStepValue(VELOCITY,1);
UpdateVelocity(Acce,OldAcceleration,CurrentVelocity,OldVelocity);
//to not move nodes with fixed flag
if(ind->IsFixed(DISPLACEMENT_X)) CurrentDisplacement[0] = 0.0;
if(ind->IsFixed(DISPLACEMENT_Y)) CurrentDisplacement[1] = 0.0;
if(ind->IsFixed(DISPLACEMENT_Z)) CurrentDisplacement[2] = 0.0;
ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0;
ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0;
ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = 0.0;
if(this->mMeshVelocity == 0.0)//EUlerian
{
ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0;
ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0;
ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = 0.0;
}
if(this->mMeshVelocity == 1.0)
{
ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = ind->FastGetSolutionStepValue(VELOCITY_X,1);
ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = ind->FastGetSolutionStepValue(VELOCITY_Y,1);
ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = ind->FastGetSolutionStepValue(VELOCITY_Z,1);
}
if(this->mMeshVelocity == 2.0)//Lagrangian
{
ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = ind->FastGetSolutionStepValue(VELOCITY_X);
ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = ind->FastGetSolutionStepValue(VELOCITY_Y);
ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = ind->FastGetSolutionStepValue(VELOCITY_Z);
(this)->UpdateDisplacement(CurrentDisplacement,OldDisplacement,OldVelocity,OldAcceleration,Acce);
}
}
KRATOS_CATCH("")
}
//******************************************************************************************
//******************************************************************************************
void Calculate_RHS_Contribution(
Element::Pointer rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
//Initializing the non linear iteration for the current element
(rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo);
//basic operations for the element considered
(rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
(rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
KRATOS_CATCH("")
}
//************************************************************************************************
//************************************************************************************************
void Condition_Calculate_RHS_Contribution(
Condition::Pointer rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
(rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo);
//basic operations for the element considered
(rCurrentCondition)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
//(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
KRATOS_CATCH("")
}
//************************************************************************************************
//************************************************************************************************
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
//************************************************************************************************
//************************************************************************************************
void UpdateVelocity(const array_1d<double, 3 > & CurrentAcceleration,
const array_1d<double, 3 > & OldAcceleration, array_1d<double, 3 > & CurrentVelocity,const array_1d<double, 3 > & OldVelocity)
{
noalias(CurrentVelocity) = OldVelocity + (CurrentAcceleration - (this)->ma2*OldAcceleration)/(this)->ma0;
}
void UpdatePressure(const double& CurrentPressureRate,
const double& OldPressureRate, double& CurrentPressure,const double& OldPressure)
{
CurrentPressure = OldPressure + (CurrentPressureRate - (this)->ma2*OldPressureRate)/(this)->ma0;
}
//****************************************************************************
/**
bdyn = b - D*vel
*/
void AddDynamicsToRHS(
Element::Pointer rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
// KRATOS_WATCH(RHS_Contribution);
// if (M.size1() != 0) {
// rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::macc, 0);
// (VelocityBossakAuxiliaries::macc) *= (1.00 - mAlphaBossak);
// rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::maccold, 1);
// noalias(VelocityBossakAuxiliaries::macc) += mAlphaBossak * VelocityBossakAuxiliaries::maccold;
// noalias(RHS_Contribution) -= prod(M, VelocityBossakAuxiliaries::macc);
// }
// KRATOS_WATCH(RHS_Contribution);
//adding damping contribution
//damping contribution
// if (D.size1() != 0) {
// rCurrentElement->GetFirstDerivativesVector(VelocityBossakAuxiliaries::mvel, 0);
// noalias(RHS_Contribution) -= prod(D, VelocityBossakAuxiliaries::mvel);
// }
// KRATOS_WATCH("Empty AddDynamicsToRHS ELEMENt");
KRATOS_CATCH("")
}
void AddDynamicsToRHS(
Condition::Pointer rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
ProcessInfo& CurrentProcessInfo)
{
//adding inertia contributionDISPLACEMENT
// if (M.size1() != 0) {
// rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::macc, 0);
// (VelocityBossakAuxiliaries::macc) *= (1.00 - mAlphaBossak);
// rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::maccold, 1);
// noalias(VelocityBossakAuxiliaries::macc) += mAlphaBossak * VelocityBossakAuxiliaries::maccold;
//
// noalias(RHS_Contribution) -= prod(M, VelocityBossakAuxiliaries::macc);
// }
//adding damping contribution
//damping contribution
// if (D.size1() != 0) {
// rCurrentElement->GetFirstDerivativesVector(VelocityBossakAuxiliaries::mvel, 0);
// noalias(RHS_Contribution) -= prod(D, VelocityBossakAuxiliaries::mvel);
// }
// KRATOS_WATCH("Empty AddDynamicsToRHS CONDITION");
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
std::vector< Matrix >mMass;
std::vector< Matrix >mDamp;
std::vector< Vector >mvel;
std::vector< Vector >macc;
std::vector< Vector >maccold;
double mGamma;
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME_COMPRESSIBLE defined */
|
adjvectorbqm.h | // Copyright 2020 D-Wave Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DIMOD_ADJVECTORBQM_H_
#define DIMOD_ADJVECTORBQM_H_
#include <stdio.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "dimod/utils.h"
namespace dimod {
template <class V, class B>
class AdjVectorBQM {
public:
using bias_type = B;
using variable_type = V;
using size_type = std::size_t;
using outvars_iterator = typename std::vector<std::pair<V, B>>::iterator;
using const_outvars_iterator =
typename std::vector<std::pair<V, B>>::const_iterator;
// in the future we'd probably like to make this protected
std::vector<std::pair<std::vector<std::pair<V, B>>, B>> adj;
AdjVectorBQM() {}
template <class BQM>
explicit AdjVectorBQM(const BQM &bqm) {
adj.resize(bqm.num_variables());
for (variable_type v = 0; v < bqm.num_variables(); ++v) {
linear(v) = bqm.linear(v);
auto span = bqm.neighborhood(v);
adj[v].first.insert(adj[v].first.begin(), span.first, span.second);
}
}
/**
* Construct a BQM from a dense array.
*
* @param dense An array containing the biases. Assumed to contain
* `num_variables`^2 elements. The upper and lower triangle are summed.
* @param num_variables The number of variables.
*/
template <class B2>
AdjVectorBQM(const B2 dense[], size_type num_variables,
bool ignore_diagonal = false) {
// we know how big our linear is going to be
adj.resize(num_variables);
bias_type qbias;
if (!ignore_diagonal) {
for (size_type v = 0; v < num_variables; ++v) {
adj[v].second = dense[v * (num_variables + 1)];
}
}
for (size_type u = 0; u < num_variables; ++u) {
for (size_type v = u + 1; v < num_variables; ++v) {
qbias = dense[u * num_variables + v] +
dense[v * num_variables + u];
if (qbias != 0) {
adj[u].first.emplace_back(v, qbias);
adj[v].first.emplace_back(u, qbias);
}
}
}
}
/**
* Construct a BQM from a dense array. This constructor is parallelized
* and temporarily zeroes out the diagonal of the dense array but restores
* it back.
*
* @param dense An array containing the biases. Assumed to contain
* `num_variables`^2 elements. The upper and lower triangle are summed.
* @param num_variables The number of variables.
*/
template <class B2>
AdjVectorBQM(B2 dense[], size_type num_variables,
bool ignore_diagonal = false) {
// we know how big our linear is going to be
adj.resize(num_variables);
// Backup copy of the diagonal of the dense matrix.
std::vector<B2> dense_diagonal(num_variables);
if (!ignore_diagonal) {
#pragma omp parallel for
for (size_type v = 0; v < num_variables; ++v) {
adj[v].second = dense[v * (num_variables + 1)];
}
}
#pragma omp parallel
{
// Zero out the diagonal to avoid expensive checks inside innermost
// loop in the code for reading the matrix. The diagonal will be
// restored so a backup copy is saved.
#pragma omp for schedule(static)
for (size_type v = 0; v < num_variables; ++v) {
dense_diagonal[v] = dense[v * (num_variables + 1)];
dense[v * (num_variables + 1)] = 0;
}
size_type counters[BLOCK_SIZE] = {0};
size_type buffer_size = num_variables * BLOCK_SIZE *
sizeof(std::pair<variable_type, bias_type>);
std::pair<variable_type, bias_type> *temp_buffer =
(std::pair<variable_type, bias_type> *)malloc(buffer_size);
if (temp_buffer == NULL) {
printf("Memory allocation failure.\n");
exit(0);
}
// We process the matrix in blocks of size BLOCK_SIZE*BLOCK_SIZE to take
// advantage of cache locality. Dynamic scheduling is used as we know some
// blocks may be more sparse than others and processing them may finish earlier.
#pragma omp for schedule(dynamic)
for (size_type u_st = 0; u_st < num_variables; u_st += BLOCK_SIZE) {
size_type u_end = std::min(u_st + BLOCK_SIZE, num_variables);
for (size_type v_st = 0; v_st < num_variables;
v_st += BLOCK_SIZE) {
size_type v_end =
std::min(v_st + BLOCK_SIZE, num_variables);
for (size_type u = u_st, n = 0; u < u_end; u++, n++) {
size_type counter_u = counters[n];
size_type counter_u_old = counter_u;
for (size_type v = v_st; v < v_end; v++) {
bias_type qbias = dense[u * num_variables + v] +
dense[v * num_variables + u];
if (qbias != 0) {
temp_buffer[n * num_variables + counter_u++] = {
v, qbias};
}
}
if (counter_u != counter_u_old) {
counters[n] = counter_u;
}
}
}
for (size_type n = 0; n < BLOCK_SIZE; n++) {
if (counters[n]) {
adj[u_st + n].first.assign(
temp_buffer + n * num_variables,
temp_buffer + n * num_variables + counters[n]);
counters[n] = 0;
}
}
}
free(temp_buffer);
// Restore the diagonal of the original dense matrix
#pragma omp for schedule(static)
for (size_type v = 0; v < num_variables; ++v) {
dense[v * (num_variables + 1)] = dense_diagonal[v];
}
}
}
/**
* Construct a BQM from COO-formated iterators.
*
* A sparse BQM encoded in [COOrdinate] format is specified by three
* arrays of (row, column, value).
*
* [COOrdinate]: https://w.wiki/n$L
*
* @param row_iterator Iterator pointing to the beginning of the row data.
* Must be a random access iterator.
* @param col_iterator Iterator pointing to the beginning of the column
* data. Must be a random access iterator.
* @param bias_iterator Iterator pointing to the beginning of the bias data.
* Must be a random access iterator.
* @param length The number of (row, column, bias) entries.
* @param ignore_diagonal If true, entries on the diagonal of the sparse
* matrix are ignored.
*/
template <class ItRow, class ItCol, class ItBias>
AdjVectorBQM(ItRow row_iterator, ItCol col_iterator, ItBias bias_iterator,
size_type length, bool ignore_diagonal = false) {
// determine the number of variables so we can allocate adj
if (length > 0) {
size_type max_label = std::max(
*std::max_element(row_iterator, row_iterator + length),
*std::max_element(col_iterator, col_iterator + length));
adj.resize(max_label + 1);
}
// Count the degrees and use that to reserve the neighborhood vectors
std::vector<size_type> degrees(adj.size());
ItRow rit(row_iterator);
ItCol cit(col_iterator);
for (size_type i = 0; i < length; ++i, ++rit, ++cit) {
if (*rit != *cit) {
degrees[*rit] += 1;
degrees[*cit] += 1;
}
}
for (size_type i = 0; i < degrees.size(); ++i) {
adj[i].first.reserve(degrees[i]);
}
// add the values to the adjacency, not worrying about order or
// duplicates
for (size_type i = 0; i < length; i++) {
if (*row_iterator == *col_iterator) {
// linear bias
if (!ignore_diagonal) {
linear(*row_iterator) += *bias_iterator;
}
} else {
// quadratic bias
adj[*row_iterator].first.emplace_back(*col_iterator,
*bias_iterator);
adj[*col_iterator].first.emplace_back(*row_iterator,
*bias_iterator);
}
++row_iterator;
++col_iterator;
++bias_iterator;
}
normalize_neighborhood();
}
/// Add one (disconnected) variable to the BQM and return its index.
variable_type add_variable() {
adj.resize(adj.size() + 1);
return adj.size() - 1;
}
/// Get the degree of variable `v`.
size_type degree(variable_type v) const { return adj[v].first.size(); }
[[deprecated("Use AdjVectorBQM::linear(v)")]] bias_type get_linear(
variable_type v) const { return linear(v); }
std::pair<bias_type, bool> get_quadratic(variable_type u,
variable_type v) const {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
assert(u != v);
auto span = neighborhood(u);
auto low = std::lower_bound(span.first, span.second, v,
utils::comp_v<V, B>);
if (low == span.second || low->first != v)
return std::make_pair(0, false);
return std::make_pair(low->second, true);
}
bias_type &linear(variable_type v) {
assert(v >= 0 && v < adj.size());
return adj[v].second;
}
const bias_type &linear(variable_type v) const {
assert(v >= 0 && v < adj.size());
return adj[v].second;
}
std::pair<outvars_iterator, outvars_iterator> neighborhood(
variable_type u) {
assert(u >= 0 && u < adj.size());
return std::make_pair(adj[u].first.begin(), adj[u].first.end());
}
std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(
variable_type u) const {
assert(u >= 0 && u < adj.size());
return std::make_pair(adj[u].first.cbegin(), adj[u].first.cend());
}
/**
* The neighborhood of variable `v`.
*
* @param A variable `v`.
* @param The neighborhood will start with the first out variable that
* does not compare less than `start`.
*
* @returns A pair of iterators pointing to the start and end of the
* neighborhood.
*/
std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(
variable_type v, variable_type start) const {
auto span = neighborhood(v);
auto low = std::lower_bound(span.first, span.second, start,
utils::comp_v<V, B>);
return std::make_pair(low, span.second);
}
/// sort each neighborhood and merge duplicates
void normalize_neighborhood() {
for (variable_type v = 0; v < adj.size(); ++v) {
auto span = neighborhood(v);
if (!std::is_sorted(span.first, span.second)) {
std::sort(span.first, span.second);
}
// now merge any duplicate variables, adding the biases
auto it = adj[v].first.begin();
while (it + 1 < adj[v].first.end()) {
if (it->first == (it + 1)->first) {
it->second += (it + 1)->second;
adj[v].first.erase(it + 1);
} else {
++it;
}
}
}
}
template<class Iter>
void normalize_neighborhood(Iter begin, Iter end) {
while (begin != end) {
auto v = *begin;
auto span = neighborhood(v);
if (!std::is_sorted(span.first, span.second)) {
std::sort(span.first, span.second);
}
// now merge any duplicate variables, adding the biases
auto it = adj[v].first.begin();
while (it + 1 < adj[v].first.end()) {
if (it->first == (it + 1)->first) {
it->second += (it + 1)->second;
adj[v].first.erase(it + 1);
} else {
++it;
}
}
++begin;
}
}
size_type num_variables() const { return adj.size(); }
size_type num_interactions() const {
size_type count = 0;
for (auto it = adj.begin(); it != adj.end(); ++it)
count += it->first.size();
return count / 2;
}
variable_type pop_variable() {
assert(adj.size() > 0);
variable_type v = adj.size() - 1;
// remove v from all of its neighbor's neighborhoods
for (auto it = adj[v].first.cbegin(); it != adj[v].first.cend(); ++it) {
auto span = neighborhood(it->first);
auto low = std::lower_bound(span.first, span.second, v,
utils::comp_v<V, B>);
adj[it->first].first.erase(low);
}
adj.pop_back();
return adj.size();
}
bool remove_interaction(variable_type u, variable_type v) {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
auto span = neighborhood(u);
auto low = std::lower_bound(span.first, span.second, v,
utils::comp_v<V, B>);
bool exists = !(low == span.second || low->first != v);
if (exists) {
adj[u].first.erase(low);
span = neighborhood(v);
low = std::lower_bound(span.first, span.second, u,
utils::comp_v<V, B>);
assert(!(low == span.second || low->first != u) == exists);
adj[v].first.erase(low);
}
return exists;
}
[[deprecated("Use AdjVectorBQM::linear(v)")]] void set_linear(
variable_type v, bias_type b) {
assert(v >= 0 && v < adj.size());
linear(v) = b;
}
bool set_quadratic(variable_type u, variable_type v, bias_type b) {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
assert(u != v);
auto span = neighborhood(u);
auto low = std::lower_bound(span.first, span.second, v,
utils::comp_v<V, B>);
bool exists = !(low == span.second || low->first != v);
if (exists) {
low->second = b;
} else {
adj[u].first.emplace(low, v, b);
}
span = neighborhood(v);
low = std::lower_bound(span.first, span.second, u, utils::comp_v<V, B>);
assert(!(low == span.second || low->first != u) == exists);
if (exists) {
low->second = b;
} else {
adj[v].first.emplace(low, u, b);
}
// to be consistent with AdjArrayBQM, we return whether the value was
// set
return true;
}
};
} // namespace dimod
#endif // DIMOD_ADJVECTORBQM_H_
|
queue.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/queue.h
* @brief Lock-free double-ended queue.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUEUE_H
#define _GLIBCXX_PARALLEL_QUEUE_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#include <parallel/compatibility.h>
/** @brief Decide whether to declare certain variable volatile in this file. */
#define _GLIBCXX_VOLATILE volatile
namespace __gnu_parallel
{
/**@brief Double-ended queue of bounded size, allowing lock-free
* atomic access. push_front() and pop_front() must not be called
* concurrently to each other, while pop_back() can be called
* concurrently at all times.
* @c empty(), @c size(), and @c top() are intentionally not provided.
* Calling them would not make sense in a concurrent setting.
* @param _Tp Contained element type. */
template<typename _Tp>
class _RestrictedBoundedConcurrentQueue
{
private:
/** @brief Array of elements, seen as cyclic buffer. */
_Tp* _M_base;
/** @brief Maximal number of elements contained at the same time. */
_SequenceIndex _M_max_size;
/** @brief Cyclic __begin and __end pointers contained in one
atomically changeable value. */
_GLIBCXX_VOLATILE _CASable _M_borders;
public:
/** @brief Constructor. Not to be called concurrent, of course.
* @param __max_size Maximal number of elements to be contained. */
_RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size)
{
_M_max_size = __max_size;
_M_base = new _Tp[__max_size];
_M_borders = __encode2(0, 0);
#pragma omp flush
}
/** @brief Destructor. Not to be called concurrent, of course. */
~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; }
/** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */
void
push_front(const _Tp& __t)
{
_CASable __former_borders = _M_borders;
int __former_front, __former_back;
__decode2(__former_borders, __former_front, __former_back);
*(_M_base + __former_front % _M_max_size) = __t;
#if _GLIBCXX_ASSERTIONS
// Otherwise: front - back > _M_max_size eventually.
_GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back)
<= _M_max_size);
#endif
__fetch_and_add(&_M_borders, __encode2(1, 0));
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_front(_Tp& __t)
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front - 1,
__former_back);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + (__former_front - 1) % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_back(_Tp& __t) //queue behavior
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front,
__former_back + 1);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + __former_back % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
};
} //namespace __gnu_parallel
#undef _GLIBCXX_VOLATILE
#endif /* _GLIBCXX_PARALLEL_QUEUE_H */
|
main.c | //==============================================================================
//==============================================================================
// DEFINE / INCLUDE
//==============================================================================
//==============================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <avilib.h>
#include <avimod.h>
#include <omp.h>
#include "define.c"
#include "kernel.c"
//==============================================================================
// WRITE DATA FUNCTION
//==============================================================================
#ifdef OUTPUT
static void write_data(char* filename, int frameNo, int frames_processed,
int endoPoints, int* input_a, int* input_b, int epiPoints,
int* input_2a, int* input_2b) {
//================================================================================
// VARIABLES
//================================================================================
FILE* fid;
int i,j;
//================================================================================
// OPEN FILE FOR READING
//================================================================================
fid = fopen(filename, "w+");
if( fid == NULL ) {
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================
// WRITE VALUES TO THE FILE
//================================================================================
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++) {
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n");
for(i=0; i<endoPoints; i++) {
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++) {
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n");
for(i=0; i<epiPoints; i++) {
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++) {
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
//================================================================================
// CLOSE FILE
//================================================================================
fclose(fid);
}
#endif
static void init_public_and_private_struct(public_struct *public, private_struct *private) {
//====================================================================================================
// ENDO POINTS
//====================================================================================================
public->endoPoints = ENDO_POINTS;
public->d_endo_mem = sizeof(int) * public->endoPoints;
public->d_endoRow = (int *)malloc(public->d_endo_mem);
public->d_endoRow[ 0] = 369;
public->d_endoRow[ 1] = 400;
public->d_endoRow[ 2] = 429;
public->d_endoRow[ 3] = 452;
public->d_endoRow[ 4] = 476;
public->d_endoRow[ 5] = 486;
public->d_endoRow[ 6] = 479;
public->d_endoRow[ 7] = 458;
public->d_endoRow[ 8] = 433;
public->d_endoRow[ 9] = 404;
public->d_endoRow[10] = 374;
public->d_endoRow[11] = 346;
public->d_endoRow[12] = 318;
public->d_endoRow[13] = 294;
public->d_endoRow[14] = 277;
public->d_endoRow[15] = 269;
public->d_endoRow[16] = 275;
public->d_endoRow[17] = 287;
public->d_endoRow[18] = 311;
public->d_endoRow[19] = 339;
public->d_endoCol = (int *)malloc(public->d_endo_mem);
public->d_endoCol[ 0] = 408;
public->d_endoCol[ 1] = 406;
public->d_endoCol[ 2] = 397;
public->d_endoCol[ 3] = 383;
public->d_endoCol[ 4] = 354;
public->d_endoCol[ 5] = 322;
public->d_endoCol[ 6] = 294;
public->d_endoCol[ 7] = 270;
public->d_endoCol[ 8] = 250;
public->d_endoCol[ 9] = 237;
public->d_endoCol[10] = 235;
public->d_endoCol[11] = 241;
public->d_endoCol[12] = 254;
public->d_endoCol[13] = 273;
public->d_endoCol[14] = 300;
public->d_endoCol[15] = 328;
public->d_endoCol[16] = 356;
public->d_endoCol[17] = 383;
public->d_endoCol[18] = 401;
public->d_endoCol[19] = 411;
public->d_tEndoRowLoc = (int *)malloc(public->d_endo_mem * public->frames);
public->d_tEndoColLoc = (int *)malloc(public->d_endo_mem * public->frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
public->epiPoints = EPI_POINTS;
public->d_epi_mem = sizeof(int) * public->epiPoints;
public->d_epiRow = (int *)malloc(public->d_epi_mem);
public->d_epiRow[ 0] = 390;
public->d_epiRow[ 1] = 419;
public->d_epiRow[ 2] = 448;
public->d_epiRow[ 3] = 474;
public->d_epiRow[ 4] = 501;
public->d_epiRow[ 5] = 519;
public->d_epiRow[ 6] = 535;
public->d_epiRow[ 7] = 542;
public->d_epiRow[ 8] = 543;
public->d_epiRow[ 9] = 538;
public->d_epiRow[10] = 528;
public->d_epiRow[11] = 511;
public->d_epiRow[12] = 491;
public->d_epiRow[13] = 466;
public->d_epiRow[14] = 438;
public->d_epiRow[15] = 406;
public->d_epiRow[16] = 376;
public->d_epiRow[17] = 347;
public->d_epiRow[18] = 318;
public->d_epiRow[19] = 291;
public->d_epiRow[20] = 275;
public->d_epiRow[21] = 259;
public->d_epiRow[22] = 256;
public->d_epiRow[23] = 252;
public->d_epiRow[24] = 252;
public->d_epiRow[25] = 257;
public->d_epiRow[26] = 266;
public->d_epiRow[27] = 283;
public->d_epiRow[28] = 305;
public->d_epiRow[29] = 331;
public->d_epiRow[30] = 360;
public->d_epiCol = (int *)malloc(public->d_epi_mem);
public->d_epiCol[ 0] = 457;
public->d_epiCol[ 1] = 454;
public->d_epiCol[ 2] = 446;
public->d_epiCol[ 3] = 431;
public->d_epiCol[ 4] = 411;
public->d_epiCol[ 5] = 388;
public->d_epiCol[ 6] = 361;
public->d_epiCol[ 7] = 331;
public->d_epiCol[ 8] = 301;
public->d_epiCol[ 9] = 273;
public->d_epiCol[10] = 243;
public->d_epiCol[11] = 218;
public->d_epiCol[12] = 196;
public->d_epiCol[13] = 178;
public->d_epiCol[14] = 166;
public->d_epiCol[15] = 157;
public->d_epiCol[16] = 155;
public->d_epiCol[17] = 165;
public->d_epiCol[18] = 177;
public->d_epiCol[19] = 197;
public->d_epiCol[20] = 218;
public->d_epiCol[21] = 248;
public->d_epiCol[22] = 276;
public->d_epiCol[23] = 304;
public->d_epiCol[24] = 333;
public->d_epiCol[25] = 361;
public->d_epiCol[26] = 391;
public->d_epiCol[27] = 415;
public->d_epiCol[28] = 434;
public->d_epiCol[29] = 448;
public->d_epiCol[30] = 455;
public->d_tEpiRowLoc = (int *)malloc(public->d_epi_mem * public->frames);
public->d_tEpiColLoc = (int *)malloc(public->d_epi_mem * public->frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
public->allPoints = ALL_POINTS;
//=====================
// CONSTANTS
//=====================
public->tSize = 25;
public->sSize = 40;
public->maxMove = 10;
public->alpha = 0.87;
//=====================
// SUMS
//=====================
for(int i=0; i<public->allPoints; i++) {
private[i].in_partial_sum = (fp *)malloc(sizeof(fp) * 2*public->tSize+1);
private[i].in_sqr_partial_sum = (fp *)malloc(sizeof(fp) * 2*public->tSize+1);
private[i].par_max_val = (fp *)malloc(sizeof(fp) * (2*public->tSize+2*public->sSize+1));
private[i].par_max_coo = (int *)malloc(sizeof(int) * (2*public->tSize+2*public->sSize+1));
}
//=====================
// INPUT 2 (SAMPLE AROUND POINT)
//=====================
public->in2_rows = 2 * public->sSize + 1;
public->in2_cols = 2 * public->sSize + 1;
public->in2_elem = public->in2_rows * public->in2_cols;
public->in2_mem = sizeof(fp) * public->in2_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_in2 = (fp *)malloc(public->in2_mem);
private[i].d_in2_sqr = (fp *)malloc(public->in2_mem);
}
//=====================
// INPUT (POINT TEMPLATE)
//=====================
public->in_mod_rows = public->tSize+1+public->tSize;
public->in_mod_cols = public->in_mod_rows;
public->in_mod_elem = public->in_mod_rows * public->in_mod_cols;
public->in_mod_mem = sizeof(fp) * public->in_mod_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_in_mod = (fp *)malloc(public->in_mod_mem);
private[i].d_in_sqr = (fp *)malloc(public->in_mod_mem);
}
//=====================
// ARRAY OF TEMPLATES FOR ALL POINTS
//=====================
public->d_endoT = (fp *)malloc(public->in_mod_mem * public->endoPoints);
public->d_epiT = (fp *)malloc(public->in_mod_mem * public->epiPoints);
//=====================
// SETUP private POINTERS TO ROWS, COLS AND TEMPLATE
//=====================
for(int i=0; i< public->endoPoints; i++) {
private[i].point_no = i;
private[i].in_pointer = private[i].point_no * public->in_mod_elem;
private[i].d_Row = public->d_endoRow; // original row coordinates
private[i].d_Col = public->d_endoCol; // original col coordinates
private[i].d_tRowLoc = public->d_tEndoRowLoc; // updated row coordinates
private[i].d_tColLoc = public->d_tEndoColLoc; // updated row coordinates
private[i].d_T = public->d_endoT; // templates
}
for(int i = public->endoPoints; i < public->allPoints; i++) {
private[i].point_no = i-public->endoPoints;
private[i].in_pointer = private[i].point_no * public->in_mod_elem;
private[i].d_Row = public->d_epiRow;
private[i].d_Col = public->d_epiCol;
private[i].d_tRowLoc = public->d_tEpiRowLoc;
private[i].d_tColLoc = public->d_tEpiColLoc;
private[i].d_T = public->d_epiT;
}
//=====================
// CONVOLUTION
//=====================
public->ioffset = 0;
public->joffset = 0;
public->conv_rows = public->in_mod_rows + public->in2_rows - 1; // number of rows in I
public->conv_cols = public->in_mod_cols + public->in2_cols - 1; // number of columns in I
public->conv_elem = public->conv_rows * public->conv_cols; // number of elements
public->conv_mem = sizeof(fp) * public->conv_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_conv = (fp *)malloc(public->conv_mem);
}
//=====================
// CUMULATIVE SUM
//=====================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
public->in2_pad_add_rows = public->in_mod_rows;
public->in2_pad_add_cols = public->in_mod_cols;
public->in2_pad_rows = public->in2_rows + 2*public->in2_pad_add_rows;
public->in2_pad_cols = public->in2_cols + 2*public->in2_pad_add_cols;
public->in2_pad_elem = public->in2_pad_rows * public->in2_pad_cols;
public->in2_pad_mem = sizeof(fp) * public->in2_pad_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_in2_pad = (fp *)malloc(public->in2_pad_mem);
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
public->in2_pad_cumv_sel_rowlow = 1 + public->in_mod_rows; // (1 to n+1)
public->in2_pad_cumv_sel_rowhig = public->in2_pad_rows - 1;
public->in2_pad_cumv_sel_collow = 1;
public->in2_pad_cumv_sel_colhig = public->in2_pad_cols;
public->in2_pad_cumv_sel2_rowlow = 1;
public->in2_pad_cumv_sel2_rowhig = public->in2_pad_rows - public->in_mod_rows - 1;
public->in2_pad_cumv_sel2_collow = 1;
public->in2_pad_cumv_sel2_colhig = public->in2_pad_cols;
public->in2_sub_rows = public->in2_pad_cumv_sel_rowhig - public->in2_pad_cumv_sel_rowlow + 1;
public->in2_sub_cols = public->in2_pad_cumv_sel_colhig - public->in2_pad_cumv_sel_collow + 1;
public->in2_sub_elem = public->in2_sub_rows * public->in2_sub_cols;
public->in2_sub_mem = sizeof(fp) * public->in2_sub_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_in2_sub = (fp *)malloc(public->in2_sub_mem);
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, SQUARE, NUMERATOR
//====================================================================================================
public->in2_sub_cumh_sel_rowlow = 1;
public->in2_sub_cumh_sel_rowhig = public->in2_sub_rows;
public->in2_sub_cumh_sel_collow = 1 + public->in_mod_cols;
public->in2_sub_cumh_sel_colhig = public->in2_sub_cols - 1;
public->in2_sub_cumh_sel2_rowlow = 1;
public->in2_sub_cumh_sel2_rowhig = public->in2_sub_rows;
public->in2_sub_cumh_sel2_collow = 1;
public->in2_sub_cumh_sel2_colhig = public->in2_sub_cols - public->in_mod_cols - 1;
public->in2_sub2_sqr_rows = public->in2_sub_cumh_sel_rowhig - public->in2_sub_cumh_sel_rowlow + 1;
public->in2_sub2_sqr_cols = public->in2_sub_cumh_sel_colhig - public->in2_sub_cumh_sel_collow + 1;
public->in2_sub2_sqr_elem = public->in2_sub2_sqr_rows * public->in2_sub2_sqr_cols;
public->in2_sub2_sqr_mem = sizeof(fp) * public->in2_sub2_sqr_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_in2_sub2_sqr = (fp *)malloc(public->in2_sub2_sqr_mem);
}
//=====================
// CUMULATIVE SUM 2
//=====================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, DIFFERENTIAL LOCAL SUM, DENOMINATOR A, DENOMINATOR, CORRELATION
//====================================================================================================
//=====================
// TEMPLATE MASK CREATE
//=====================
public->tMask_rows = public->in_mod_rows + (public->sSize+1+public->sSize) - 1;
public->tMask_cols = public->tMask_rows;
public->tMask_elem = public->tMask_rows * public->tMask_cols;
public->tMask_mem = sizeof(fp) * public->tMask_elem;
for(int i=0; i < public->allPoints; i++) {
private[i].d_tMask = (fp *)malloc(public->tMask_mem);
}
//=====================
// POINT MASK INITIALIZE
//=====================
public->mask_rows = public->maxMove;
public->mask_cols = public->mask_rows;
public->mask_elem = public->mask_rows * public->mask_cols;
public->mask_mem = sizeof(fp) * public->mask_elem;
//=====================
// MASK CONVOLUTION
//=====================
public->mask_conv_rows = public->tMask_rows; // number of rows in I
public->mask_conv_cols = public->tMask_cols; // number of columns in I
public->mask_conv_elem = public->mask_conv_rows * public->mask_conv_cols; // number of elements
public->mask_conv_mem = sizeof(fp) * public->mask_conv_elem;
public->mask_conv_ioffset = (public->mask_rows-1)/2;
if((public->mask_rows-1) % 2 > 0.5) {
public->mask_conv_ioffset = public->mask_conv_ioffset + 1;
}
public->mask_conv_joffset = (public->mask_cols-1)/2;
if((public->mask_cols-1) % 2 > 0.5) {
public->mask_conv_joffset = public->mask_conv_joffset + 1;
}
for(int i=0; i < public->allPoints; i++) {
private[i].d_mask_conv = (fp *)malloc(public->mask_conv_mem);
}
}
static void cleanup(public_struct *public, private_struct *private) {
//====================================================================================================
// POINTERS
//====================================================================================================
for(int i=0; i < public->allPoints; i++) {
free(private[i].in_partial_sum);
free(private[i].in_sqr_partial_sum);
free(private[i].par_max_val);
free(private[i].par_max_coo);
free(private[i].d_in2);
free(private[i].d_in2_sqr);
free(private[i].d_in_mod);
free(private[i].d_in_sqr);
free(private[i].d_conv);
free(private[i].d_in2_pad);
free(private[i].d_in2_sub);
free(private[i].d_in2_sub2_sqr);
free(private[i].d_tMask);
free(private[i].d_mask_conv);
}
//====================================================================================================
// COMMON
//====================================================================================================
free(public->d_endoRow);
free(public->d_endoCol);
free(public->d_tEndoRowLoc);
free(public->d_tEndoColLoc);
free(public->d_endoT);
free(public->d_epiRow);
free(public->d_epiCol);
free(public->d_tEpiRowLoc);
free(public->d_tEpiColLoc);
free(public->d_epiT);
}
//==============================================================================
//==============================================================================
// MAIN FUNCTION
//==============================================================================
//==============================================================================
int main(int argc, char *argv []) {
//=====================
// VARIABLES
//=====================
// counters
int i;
int frames_processed;
// parameters
public_struct public;
private_struct private[ALL_POINTS];
//=====================
// FRAMES
//=====================
if(argc!=4) {
printf("ERROR: usage: heartwall <inputfile> <num of frames> <num of threads>\n");
exit(1);
}
char* video_file_name;
video_file_name = argv[1];
avi_t* d_frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (d_frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
public.d_frames = d_frames;
public.frames = AVI_video_frames(public.d_frames);
public.frame_rows = AVI_video_height(public.d_frames);
public.frame_cols = AVI_video_width(public.d_frames);
public.frame_elem = public.frame_rows * public.frame_cols;
public.frame_mem = sizeof(fp) * public.frame_elem;
//=====================
// CHECK INPUT ARGUMENTS
//=====================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>public.frames) {
printf("ERROR: %d is an incorrect number of frames specified\n.", frames_processed);
printf("Select in the range of 0-%d\n", public.frames);
return 0;
}
int omp_num_threads;
omp_num_threads = atoi(argv[3]);
if (omp_num_threads <=0) {
printf ("num of threads must be a positive integer");
return 0;
}
printf("num of threads: %d\n", omp_num_threads);
//=====================
// INPUTS
//=====================
init_public_and_private_struct(&public, private);
//=====================
// PRINT FRAME PROGRESS START
//=====================
printf("frame progress: ");
fflush(NULL);
//=====================
// KERNEL
//=====================
for(public.frame_no=0; public.frame_no<frames_processed; public.frame_no++) {
//====================================================================================================
// GETTING FRAME
//====================================================================================================
// Extract a cropped version of the first frame from the video file
public.d_frame = get_frame(public.d_frames, // pointer to video file
public.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
//====================================================================================================
// PROCESSING
//====================================================================================================
omp_set_num_threads(omp_num_threads);
#pragma omp parallel for
for(i=0; i<public.allPoints; i++) {
kernel(public, private[i]);
}
//====================================================================================================
// FREE MEMORY FOR FRAME
//====================================================================================================
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(public.d_frame);
//====================================================================================================
// PRINT FRAME PROGRESS
//====================================================================================================
printf("%d ", public.frame_no);
fflush(NULL);
}
//=====================
// PRINT FRAME PROGRESS END
//=====================
printf("\n");
fflush(NULL);
//=====================
// DEALLOCATION
//=====================
//==================================================50
// DUMP DATA TO FILE
//==================================================50
#ifdef OUTPUT
write_data( "result.txt",
public.frames,
frames_processed,
public.endoPoints,
public.d_tEndoRowLoc,
public.d_tEndoColLoc,
public.epiPoints,
public.d_tEpiRowLoc,
public.d_tEpiColLoc);
#endif
cleanup(&public, private);
return 0;
}
//=======================================================================
//=======================================================================
// END OF FILE
//=======================================================================
//=======================================================================
|
target_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd foo
void test_no_clause() {
int i;
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target parallel for simd' must be a for loop}}
#pragma omp target parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
#pragma omp target parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target parallel for simd' directive may not be firstprivate, predetermined as lastprivate}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
|
cposv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zposv.c, normal z -> c, Fri Sep 28 17:38:09 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_posv
*
* Computes the solution to a system of linear equations A * X = B,
* where A is an n-by-n Hermitian positive definite matrix and X and B are
* n-by-nrhs matrices. The Cholesky decomposition is used to factor A as
*
* \f[ A = L\times L^H, \f] if uplo = PlasmaLower,
* or
* \f[ A = U^H\times U, \f] if uplo = PlasmaUpper,
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
* The factored form of A is then used to solve the system of equations:
*
* A * X = B.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns
* of the matrix B. nrhs >= 0.
*
* @param[in,out] pA
* On entry, the Hermitian positive definite matrix A.
* If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If UPLO = 'L', the leading n-by-n lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from
* the Cholesky factorization A = U^H*U or A = L*L^H.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_cposv
* @sa plasma_cposv
* @sa plasma_dposv
* @sa plasma_sposv
*
******************************************************************************/
int plasma_cposv(plasma_enum_t uplo,
int n, int nrhs,
plasma_complex32_t *pA, int lda,
plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_potrf(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_triangular_create(PlasmaComplexFloat, uplo, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_ctr2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_cposv(uplo, A, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2tr(A, pA, lda, &sequence, &request);
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_posv
*
* Solves a Hermitian positive definite system of linear equations
* using Cholesky factorization.
* Non-blocking tile version of plasma_cposv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in,out] A
* On entry, the Hermitian positive definite matrix A.
* If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If UPLO = 'L', the leading n-by-n lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from
* the Cholesky factorization A = U^H*U or A = L*L^H.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cposv
* @sa plasma_omp_cposv
* @sa plasma_omp_dposv
* @sa plasma_omp_sposv
*
******************************************************************************/
void plasma_omp_cposv(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pcpotrf(uplo, A, sequence, request);
plasma_enum_t trans;
trans = uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans;
plasma_pctrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit,
1.0, A,
B,
sequence, request);
trans = uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans;
plasma_pctrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit,
1.0, A,
B,
sequence, request);
}
|
autocoder.c | /*
libdeep - a library for deep learning
Copyright (C) 2013-2017 Bob Mottram <bob@freedombone.net>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE HOLDERS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "autocoder.h"
/**
* @brief Initialise an autocoder
* @param autocoder Autocoder object
* @param no_of_inputs The number of inputs
* @param no_of_hiddens The number of hidden (encoder) units
* @param random_seed Random number generator seed
* @return zero on success
*/
int autocoder_init(ac * autocoder,
int no_of_inputs,
int no_of_hiddens,
unsigned int random_seed)
{
autocoder->no_of_inputs = no_of_inputs;
autocoder->no_of_hiddens = no_of_hiddens;
FLOATALLOC(autocoder->inputs, no_of_inputs);
if (!autocoder->inputs)
return -1;
FLOATALLOC(autocoder->hiddens, no_of_hiddens);
if (!autocoder->hiddens) {
free(autocoder->inputs);
return -2;
}
FLOATALLOC(autocoder->bias, no_of_hiddens);
if (!autocoder->bias) {
free(autocoder->hiddens);
free(autocoder->inputs);
return -3;
}
FLOATALLOC(autocoder->weights, no_of_hiddens*no_of_inputs);
if (!autocoder->weights) {
free(autocoder->bias);
free(autocoder->hiddens);
free(autocoder->inputs);
return -4;
}
FLOATALLOC(autocoder->last_weight_change, no_of_hiddens*no_of_inputs);
if (!autocoder->last_weight_change) {
free(autocoder->weights);
free(autocoder->bias);
free(autocoder->hiddens);
free(autocoder->inputs);
return -5;
}
FLOATALLOC(autocoder->outputs, no_of_inputs);
if (!autocoder->outputs) {
free(autocoder->last_weight_change);
free(autocoder->weights);
free(autocoder->bias);
free(autocoder->hiddens);
free(autocoder->inputs);
return -6;
}
FLOATALLOC(autocoder->bperr, no_of_hiddens);
if (!autocoder->bperr) {
free(autocoder->outputs);
free(autocoder->last_weight_change);
free(autocoder->weights);
free(autocoder->bias);
free(autocoder->hiddens);
free(autocoder->inputs);
return -7;
}
FLOATALLOC(autocoder->last_bias_change, no_of_hiddens);
if (!autocoder->last_bias_change) {
free(autocoder->bperr);
free(autocoder->outputs);
free(autocoder->last_weight_change);
free(autocoder->weights);
free(autocoder->bias);
free(autocoder->hiddens);
free(autocoder->inputs);
return -8;
}
FLOATCLEAR(autocoder->inputs, no_of_inputs);
FLOATCLEAR(autocoder->outputs, no_of_inputs);
FLOATCLEAR(autocoder->hiddens, no_of_hiddens);
FLOATCLEAR(autocoder->last_weight_change, no_of_hiddens*no_of_inputs);
FLOATCLEAR(autocoder->bperr, no_of_hiddens);
FLOATCLEAR(autocoder->last_bias_change, no_of_hiddens);
autocoder->backprop_error = AUTOCODER_UNKNOWN;
autocoder->backprop_error_average = AUTOCODER_UNKNOWN;
autocoder->learning_rate = 0.2f;
autocoder->noise = 0;
autocoder->random_seed = random_seed;
autocoder->itterations = 0;
autocoder->dropout_percent = 0.01f;
/* initial small random values */
COUNTDOWN(h, no_of_hiddens) {
autocoder->bias[h] =
rand_initial_weight(&autocoder->random_seed, 2);
COUNTDOWN(i, no_of_inputs)
autocoder->weights[h*no_of_inputs + i] =
rand_initial_weight(&autocoder->random_seed, no_of_inputs);
}
return 0;
}
/**
* @brief frees memory for an autocoder
* @param autocoder Autocoder object
*/
void autocoder_free(ac * autocoder)
{
free(autocoder->inputs);
free(autocoder->outputs);
free(autocoder->hiddens);
free(autocoder->bias);
free(autocoder->weights);
free(autocoder->last_weight_change);
free(autocoder->bperr);
free(autocoder->last_bias_change);
}
/**
* @brief Encodes the inputs to a given array
* @param autocoder Autocoder object
* @param encoded Array to store the encoded values
* @param use_dropouts If non-zero then allow dropouts in the returned results
*/
void autocoder_encode(ac * autocoder, float encoded[],
unsigned char use_dropouts)
{
const unsigned int drop_percent =
(unsigned int)(autocoder->dropout_percent*100);
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(h, autocoder->no_of_hiddens) {
unsigned int randseed = (unsigned int)h + autocoder->random_seed;
if (use_dropouts != 0) {
if (rand_num(&randseed)%10000 < drop_percent) {
autocoder->hiddens[h] = (int)AUTOCODER_DROPPED_OUT;
continue;
}
}
/* weighted sum of inputs */
float adder = autocoder->bias[h];
float * w = &autocoder->weights[h*autocoder->no_of_inputs];
float * inp = &autocoder->inputs[0];
if (use_dropouts == 0) {
COUNTDOWN(i, autocoder->no_of_inputs) {
adder += w[i] * inp[i];
}
}
else {
COUNTDOWN(i, autocoder->no_of_inputs) {
if (rand_num(&randseed)%10000 > drop_percent) {
adder += w[i] * inp[i];
}
}
}
/* add some random noise */
if (autocoder->noise > 0) {
adder = ((1.0f - autocoder->noise) * adder) +
(autocoder->noise *
((rand_num(&randseed)%10000)/10000.0f));
}
/* activation function */
encoded[h] = AF(adder);
}
rand_num(&autocoder->random_seed);
}
/**
* @brief Decodes the encoded (hidden) units to a given output array
* @param autocoder Autocoder object
* @param decoded Array to store the decoded output values
* @param use_dropouts If non-zero then allow dropouts in the returned results
*/
void autocoder_decode(ac * autocoder, float decoded[],
unsigned char use_dropouts)
{
const unsigned int drop_percent =
(unsigned int)(autocoder->dropout_percent*100);
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, autocoder->no_of_inputs) {
unsigned int randseed = (unsigned int)i + autocoder->random_seed;
/* weighted sum of hidden inputs */
float adder = 0;
float * w = &autocoder->weights[i];
float * inp = &autocoder->hiddens[0];
int step = autocoder->no_of_inputs;
if (use_dropouts == 0) {
COUNTDOWN(h, autocoder->no_of_hiddens) {
adder += w[h*step] * inp[h];
}
}
else {
COUNTDOWN(h, autocoder->no_of_hiddens) {
if (inp[h] != AUTOCODER_DROPPED_OUT) {
if (rand_num(&randseed)%10000 > drop_percent) {
adder += w[h*step] * inp[h];
}
}
}
}
/* add some random noise */
if (autocoder->noise > 0) {
adder = ((1.0f - autocoder->noise) * adder) +
(autocoder->noise *
((rand_num(&randseed)%10000)/10000.0f));
}
/* activation function */
decoded[i] = AF(adder);
}
rand_num(&autocoder->random_seed);
}
/**
* @brief Feed forward
* @param autocoder Autocoder object
*/
void autocoder_feed_forward(ac * autocoder)
{
autocoder_encode(autocoder, autocoder->hiddens, 1);
autocoder_decode(autocoder, autocoder->outputs, 1);
}
/**
* @brief Back propogate the error
* @param autocoder Autocoder object
*/
void autocoder_backprop(ac * autocoder)
{
/* clear the backptop error for each hidden unit */
FLOATCLEAR(autocoder->bperr, autocoder->no_of_hiddens);
/* backprop from outputs to hiddens */
autocoder->backprop_error = 0;
float error_percent = 0;
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, autocoder->no_of_inputs) {
float backprop_error = autocoder->inputs[i] - autocoder->outputs[i];
autocoder->backprop_error += fabs(backprop_error);
error_percent += fabs(backprop_error);
float afact = autocoder->outputs[i] * (1.0f - autocoder->outputs[i]);
float bperr = backprop_error * afact;
float * w = &autocoder->weights[i];
int step = autocoder->no_of_inputs;
COUNTDOWN(h, autocoder->no_of_hiddens) {
if (autocoder->hiddens[h] != AUTOCODER_DROPPED_OUT)
autocoder->bperr[h] += bperr * w[h*step];
}
}
/* convert summed error to an overall percentage */
error_percent = error_percent * 100 /
(NEURON_RANGE*autocoder->no_of_inputs);
/* update the running average */
if (autocoder->backprop_error_average == AUTOCODER_UNKNOWN) {
autocoder->backprop_error_average = autocoder->backprop_error;
autocoder->backprop_error_percent = error_percent;
}
else {
autocoder->backprop_error_average =
(autocoder->backprop_error_average*0.999f) +
(autocoder->backprop_error*0.001f);
autocoder->backprop_error_percent =
(autocoder->backprop_error_percent*0.999f) +
(error_percent*0.001f);
}
/* increment the number of training itterations */
if (autocoder->itterations < UINT_MAX)
autocoder->itterations++;
}
/**
* @brief Adjusts weights and biases
* @param autocoder Autocoder object
*/
void autocoder_learn(ac * autocoder)
{
/* weights between outputs and hiddens */
float e = autocoder->learning_rate / (1.0f + autocoder->no_of_hiddens);
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, autocoder->no_of_inputs) {
float afact = autocoder->outputs[i] * (1.0f - autocoder->outputs[i]);
float backprop_error = autocoder->inputs[i] - autocoder->outputs[i];
float gradient = afact * backprop_error;
float egradient = e * gradient;
int step = autocoder->no_of_inputs;
int n = (autocoder->no_of_hiddens-1)*step + i;
COUNTDOWN(h, autocoder->no_of_hiddens) {
if (autocoder->hiddens[h] != AUTOCODER_DROPPED_OUT) {
autocoder->last_weight_change[n] =
egradient * (autocoder->last_weight_change[n] + 1) *
autocoder->hiddens[h];
autocoder->weights[n] =
CLIP_WEIGHT(autocoder->weights[n] +
autocoder->last_weight_change[n]);
}
n -= step;
}
}
/* weights between hiddens and inputs */
e = autocoder->learning_rate / (1.0f + autocoder->no_of_inputs);
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(h, autocoder->no_of_hiddens) {
if (autocoder->hiddens[h] == AUTOCODER_DROPPED_OUT)
continue;
float afact = autocoder->hiddens[h] * (1.0f - autocoder->hiddens[h]);
float backprop_error = autocoder->bperr[h];
float gradient = afact * backprop_error;
float egradient = e * gradient;
autocoder->last_bias_change[h] =
e * (autocoder->last_bias_change[h] + 1.0f) * gradient;
autocoder->bias[h] =
CLIP_WEIGHT(autocoder->bias[h] +
autocoder->last_bias_change[h]);
int n = (h+1)*autocoder->no_of_inputs - 1;
COUNTDOWN(i, autocoder->no_of_inputs) {
autocoder->last_weight_change[n] =
egradient * (autocoder->last_weight_change[n] + 1) *
autocoder->inputs[i];
autocoder->weights[n] =
CLIP_WEIGHT(autocoder->weights[n] +
autocoder->last_weight_change[n]);
n--;
}
}
}
/**
* @brief Save an autocoder to file
* @param fp Pointer to the file
* @param autocoder Autocoder object
* @return zero on success
*/
int autocoder_save(FILE * fp, ac * autocoder)
{
if (INTWRITE(autocoder->no_of_inputs) == 0)
return -1;
if (INTWRITE(autocoder->no_of_hiddens) == 0)
return -2;
if (UINTWRITE(autocoder->random_seed) == 0)
return -3;
if (FLOATWRITE(autocoder->dropout_percent) == 0)
return -4;
if (FLOATWRITEARRAY(autocoder->weights,
autocoder->no_of_inputs*autocoder->no_of_hiddens) == 0)
return -5;
if (FLOATWRITEARRAY(autocoder->last_weight_change,
autocoder->no_of_inputs*autocoder->no_of_hiddens) == 0)
return -6;
if (FLOATWRITEARRAY(autocoder->bias, autocoder->no_of_hiddens) == 0)
return -7;
if (FLOATWRITEARRAY(autocoder->last_bias_change,
autocoder->no_of_hiddens) == 0)
return -8;
if (FLOATWRITE(autocoder->learning_rate) == 0)
return -9;
if (FLOATWRITE(autocoder->noise) == 0)
return -10;
if (UINTWRITE(autocoder->itterations) == 0)
return -11;
return 0;
}
/**
* @brief Load an autocoder from file
* @param fp Pointer to the file
* @param autocoder Autocoder object
* @param initialise Whether to initialise
* @return zero on success
*/
int autocoder_load(FILE * fp, ac * autocoder, int initialise)
{
int no_of_inputs = 0;
int no_of_hiddens = 0;
unsigned int random_seed = 0;
if (INTREAD(no_of_inputs) == 0)
return -1;
if (INTREAD(no_of_hiddens) == 0)
return -2;
if (UINTREAD(random_seed) == 0)
return -3;
/* create the autocoder */
if (initialise != 0) {
if (autocoder_init(autocoder,
no_of_inputs,
no_of_hiddens,
random_seed) != 0) {
return -4;
}
}
else {
autocoder->no_of_inputs = no_of_inputs;
autocoder->no_of_hiddens = no_of_hiddens;
autocoder->random_seed = random_seed;
}
if (FLOATREAD(autocoder->dropout_percent) == 0)
return -5;
if (FLOATREADARRAY(autocoder->weights,
no_of_inputs*no_of_hiddens) == 0)
return -6;
if (FLOATREADARRAY(autocoder->last_weight_change,
no_of_inputs*no_of_hiddens) == 0)
return -7;
if (FLOATREADARRAY(autocoder->bias, no_of_hiddens) == 0)
return -8;
if (FLOATREADARRAY(autocoder->last_bias_change, no_of_hiddens) == 0)
return -9;
if (FLOATREAD(autocoder->learning_rate) == 0)
return -10;
if (FLOATREAD(autocoder->noise) == 0)
return -11;
if (UINTREAD(autocoder->itterations) == 0)
return -12;
return 0;
}
/**
* @brief Sets the input of an autocoder
* @param autocoder Autocoder object
* @param index Array index of the input
* @param value The value to set the input to
*/
void autocoder_set_input(ac * autocoder, int index, float value)
{
autocoder->inputs[index] = value;
}
/**
* @brief Sets autocoder inputs from an array
* @param autocoder Autocoder object
* @param inputs Array containing input values
*/
void autocoder_set_inputs(ac * autocoder, float inputs[])
{
memcpy((void*)autocoder->inputs, inputs,
autocoder->no_of_inputs*sizeof(float));
}
/**
* @brief Returns the value of a hidden unit
* @param autocoder Autocoder object
* @param index Array index of the hidden (encoder) unit
* @return Value of the hidden (encoder) unit
*/
float autocoder_get_hidden(ac * autocoder, int index)
{
return autocoder->hiddens[index];
}
/**
* @brief Sets the value of a hidden unit
* @param autocoder Autocoder object
* @param index Array index of the hidden (encoder) unit
* @param value Value to set as
*/
void autocoder_set_hidden(ac * autocoder, int index, float value)
{
autocoder->hiddens[index] = value;
}
/**
* @brief Main update routine for training
* @param autocoder Autocoder object
*/
void autocoder_update(ac * autocoder)
{
autocoder_feed_forward(autocoder);
autocoder_backprop(autocoder);
autocoder_learn(autocoder);
}
/**
* @brief Normalises the inputs to the autocoder
* @param autocoder Autocoder object
*/
void autocoder_normalise_inputs(ac * autocoder)
{
float min = autocoder->inputs[0];
float max = autocoder->inputs[0];
FOR(i, 1, autocoder->no_of_inputs) {
if (autocoder->inputs[i] < min)
min = autocoder->inputs[i];
if (autocoder->inputs[i] > max)
max = autocoder->inputs[i];
}
float range = max - min;
if (range <= 0) return;
COUNTUP(i, autocoder->no_of_inputs) {
autocoder->inputs[i] =
NEURON_LOW +
(((autocoder->inputs[i] - min)/range)*NEURON_RANGE);
}
}
/**
* @brief Returns zero if two autocoders are the same
* @param autocoder0 The first autocoder
* @param autocoder1 The second autocoder
* @return zero on success
*/
int autocoder_compare(ac * autocoder0, ac * autocoder1)
{
if (autocoder0->no_of_inputs != autocoder1->no_of_inputs)
return -1;
if (autocoder0->no_of_hiddens != autocoder1->no_of_hiddens)
return -2;
COUNTDOWN(h, autocoder0->no_of_hiddens) {
if (autocoder0->bias[h] != autocoder1->bias[h])
return -3;
}
COUNTDOWN(i, autocoder0->no_of_inputs*autocoder0->no_of_hiddens) {
if (autocoder0->weights[i] != autocoder1->weights[i])
return -4;
}
return 0;
}
/**
* @brief Plots weight values within an image
* @param autocoder Autocoder object
* @param feature_index Index number of the hidden (encoder) unit
* @param patch_radius Radius of the patch in the input layer of a
* convolution system
* @param patch depth Depth of the input layer of a convolution system
* @param img_tx Top x coordinate for where to draw the weights
* @param img_ty Top y coordinate for where to draw the weights
* @param img_bx Bottom x coordinate for where to draw the weights
* @param img_by Bottom y coordinate for where to draw the weights
* @param img Image array (3 bytes per pixel)
* @param img_width Width of the image
* @param img_height Height of the image
* @return zero on success
*/
int autocoder_plot_weights(ac * autocoder,
int feature_index,
int patch_radius, int patch_depth,
int img_tx, int img_ty, int img_bx, int img_by,
unsigned char img[],
int img_width, int img_height)
{
int img_y_range = img_by - img_ty;
int img_x_range = img_bx - img_tx;
int patch_width = patch_radius*2;
int no_of_weights = patch_width*patch_width*patch_depth;
/* check that the number of inputs matches the expected patch size */
if (autocoder->no_of_inputs != no_of_weights)
return -1;
if ((img_x_range == 0) || (img_y_range == 0))
return -2;
float min_weight = autocoder->weights[0];
float max_weight = min_weight;
int start_index = feature_index*no_of_weights;
FOR(i, start_index, start_index + no_of_weights) {
if (autocoder->weights[i] < min_weight)
min_weight = autocoder->weights[i];
if (autocoder->weights[i] > max_weight)
max_weight = autocoder->weights[i];
}
float weight_range = max_weight - min_weight;
if (weight_range <= 0.0f)
return -3;
/* for every pixel in the output image */
FOR(y, img_ty, img_by) {
int patch_y = (y - img_ty) * patch_width / img_y_range;
FOR(x, img_tx, img_bx) {
int patch_x = (x - img_tx) * patch_width / img_x_range;
/* position in the image */
int img_n = (y*img_width + x)*3;
/* position in the patch */
int patch_n = (patch_y*patch_width + patch_x)*patch_depth;
COUNTDOWN(c, 3) {
float w = autocoder->weights[start_index + patch_n +
(c*patch_depth/3)];
img[img_n + c] =
(unsigned char)((w-min_weight)*255/weight_range);
}
}
}
return 0;
}
/**
* @brief Plots weight matrices within an image
* @param net Autocoder neural net object
* @param filename Filename of the image to save as
* @param image_width Width of the image in pixels
* @param image_height Height of the image in pixels
*/
int autocoder_plot_weight_matrix(ac * net,
char * filename,
int image_width, int image_height)
{
float w, min_w=9999999.0f, max_w=-9999999.0f;
float min_bias=9999999.0f, max_bias=-999999.0f;
float min_hidden=999999.0f, max_hidden=-999999.0f;
unsigned char * img;
/* allocate memory for the image */
UCHARALLOC(img, image_width*image_height*3);
if (!img)
return -1;
/* clear the image with a white background */
memset((void*)img, '\255',
image_width*image_height*3*sizeof(unsigned char));
/* get the weight range */
COUNTDOWN(h, net->no_of_hiddens) {
COUNTDOWN(i, net->no_of_inputs) {
w = net->weights[h*net->no_of_inputs + i];
if (w < min_w) min_w = w;
if (w > max_w) max_w = w;
}
}
/* get the bias and hidden unit range */
COUNTDOWN(h, net->no_of_hiddens) {
if (net->bias[h] < min_bias) min_bias = net->bias[h];
if (net->bias[h] > max_bias) max_bias = net->bias[h];
if (net->hiddens[h] < min_hidden) min_hidden = net->hiddens[h];
if (net->hiddens[h] > max_hidden) max_hidden = net->hiddens[h];
}
if (max_bias > min_bias) {
COUNTDOWN(y, image_height) {
int h = y*net->no_of_hiddens/image_height;
COUNTDOWN(x, image_width) {
int i = x*net->no_of_inputs/image_width;
int n = (y*image_width + x)*3;
w = net->weights[h*net->no_of_inputs + i];
img[n] = (unsigned char)((w - min_w)*255/(max_w - min_w));
img[n+1] =
(unsigned char)((net->bias[h]-min_bias)*255/
(max_bias - min_bias));
if (max_hidden > min_hidden)
img[n+2] =
(unsigned char)((net->hiddens[h]-min_hidden)*255/
(max_hidden - min_hidden));
else
img[n+2] = (unsigned char)255;
}
}
}
/* write the image to file */
deeplearn_write_png_file(filename,
(unsigned int)image_width,
(unsigned int)image_height,
24, img);
/* free the image memory */
free(img);
return 0;
}
|
concattest4.c | #include <stdlib.h>
#include "concattest4.h"
void concattest4(float* l,int m,int n,float*output){
#pragma omp parallel for
for (int H10 = 0; H10 < 1; H10++) {
for (int H11 = 0; H11 < n; H11++) {
output[((1 + (m - (1)))) * (H11) + H10] = l[(H11) * (m) + H10];
}
}
#pragma omp parallel for
for (int H12 = 1; H12 < m; H12++) {
for (int H13 = 0; H13 < n; H13++) {
output[((1 + (m - (1)))) * (H13) + ((H12 - (1)) + 1)] = l[(H13) * (m) + H12];
}
}
}
|
omp_multiplicacao.c | /******************************************************************************
* FILE: mm.c
* DESCRIPTION:
* Matrix Multiply - C Version
* Modified from Blaise Barney OpenMP code.
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
#define NRA 2048 /* number of rows in matrix A */
#define NCA 2048 /* number of columns in matrix A */
#define NCB 2048 /* number of columns in matrix B */
//gcc omp_multiplicacao.c -fopenmp -o ompmultiplicacao
//for i in `seq 1 10`; do ./ompmultiplicacao; done
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec + t.tv_usec / 1000000.0;
}
int main (int argc, char *argv[])
{
int i, j, k;
double start_time, end_time;
// double a[NRA][NCA], /* matrix A to be multiplied */
// b[NCA][NCB], /* matrix B to be multiplied */
// c[NRA][NCB]; /* result matrix C */
double **a, **b, **c;
a = malloc(NRA*sizeof(double*));
for(i=0;i<NRA;i++){
a[i] = malloc(NCA*sizeof(double));
}
b = malloc(NCA*sizeof(double*));
for(i=0;i<NCA;i++){
b[i] = malloc(NCB*sizeof(double));
}
c = malloc(NRA*sizeof(double*));
for(i=0;i<NRA;i++){
c[i] = malloc(NCB*sizeof(double));
}
#pragma omp parallel private(i, j, k) shared(a, b, c)
{
/*** Initialize matrices ***/
#pragma omp for schedule(dynamic) nowait
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
#pragma omp for schedule(dynamic) nowait
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
#pragma omp for schedule(dynamic) nowait
for (i=0; i<NRA; i++)
for (j=0; j<NCB; j++)
c[i][j]= 0;
}
start_time = wtime();
#pragma omp parallel private(i, j, k) shared(a, b, c)
{
/*** Do matrix multiply ***/
#pragma omp for schedule(dynamic) nowait
for (i=0; i<NRA; i++)
for(j=0; j<NCB; j++)
for (k=0; k<NCA; k++)
c[i][j] += a[i][k] * b[k][j];
}
end_time = wtime();
/*** Print results ***/
/*
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCB; j++)
printf("%6.2f ", c[i][j]);
printf("\n");
}
printf("******************************************************\n");
*/
printf ("%f\n", end_time - start_time);
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
protected:
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(std::move(Pred)) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
explicit operator bool() { return Current != End; }
bool empty() const { return Current == End; }
};
template <typename Fn>
filtered_clause_iterator<Fn> getFilteredClauses(Fn &&fn) const {
return filtered_clause_iterator<Fn>(clauses(), std::move(fn));
}
struct ClauseKindFilter {
OpenMPClauseKind Kind;
bool operator()(const OMPClause *clause) const {
return clause->getClauseKind() == Kind;
}
};
filtered_clause_iterator<ClauseKindFilter>
getClausesOfKind(OpenMPClauseKind Kind) const {
return getFilteredClauses(ClauseKindFilter{Kind});
}
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 8,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 15,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
3 * CollapsedNum; // Counters, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setIsLastIterVariable(Expr *IL) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
Expr *getIsLastIterVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
} // end namespace clang
#endif
|
ft_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <assert.h>
#include "config.h"
#include "cint.h"
#include "gto/ft_ao.h"
#include "vhf/fblas.h"
#define INTBUFMAX 16000
#define IMGBLK 80
#define OF_CMPLX 2
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
int PBCsizeof_env(int *shls_slice,
int *atm, int natm, int *bas, int nbas, double *env);
static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL)
{
env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0];
env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1];
env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2];
}
/*
* Multiple k-points
*/
static void _ft_fill_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const char TRANS_N = 'N';
const double complex Z1 = 1;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + dij*blksize * comp * nkpts;
double complex *pbuf;
int gs0, gs1, dg, dijg;
int jL0, jLcount, jL;
int i;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg*nkpts; i++) {
bufk[i] = 0;
}
for (jL0 = 0; jL0 < nimgs; jL0 += IMGBLK) {
jLcount = MIN(IMGBLK, nimgs-jL0);
pbuf = bufL;
for (jL = jL0; jL < jL0+jLcount; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(pbuf, shls, dims, eval_aopair, eval_gz,
Z1, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
} else {
for (i = 0; i < dijg; i++) {
pbuf[i] = 0;
}
}
pbuf += dijg;
}
zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &jLcount,
&Z1, bufL, &dijg, expkL+jL0, &nimgs,
&Z1, bufk, &dijg);
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Single k-point
*/
static void _ft_fill_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + dij*blksize * comp;
int gs0, gs1, dg, jL, i;
size_t dijg;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg; i++) {
bufk[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(bufL, shls, dims, eval_aopair, eval_gz,
expkL[jL], sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
for (i = 0; i < dijg; i++) {
bufk[i] += bufL[i];
}
}
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Multiple k-points for BvK cell
*/
static void _ft_bvk_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out,
int nkpts, int comp, int nimgs, int bvk_nimgs, int blksize,
int ish, int jsh, int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
ovlp_mask += (ish * njsh + jsh) * nimgs;
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const char TRANS_N = 'N';
const double complex Z1 = 1;
const double complex Z0 = 0;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *buf_rs = buf;
double complex *bufL = buf + dij * blksize * comp * nkpts;
double complex *pbuf;
int gs0, gs1, dg, dijg;
int jL, i;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg*bvk_nimgs; i++) {
bufL[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
if (!ovlp_mask[jL]) {
continue;
}
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(buf_rs, shls, dims, eval_aopair, eval_gz,
Z1, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
pbuf = bufL + dijg * cell_loc_bvk[jL];
for (i = 0; i < dijg; i++) {
pbuf[i] += buf_rs[i];
}
}
}
zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &bvk_nimgs,
&Z1, bufL, &dijg, expkL, &bvk_nimgs, &Z0, buf, &dijg);
(*fsort)(out, buf, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Single k-point for BvK cell
*/
static void _ft_bvk_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out,
int nkpts, int comp, int nimgs, int bvk_nimgs, int blksize,
int ish, int jsh, int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
ovlp_mask += (ish * njsh + jsh) * nimgs;
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex fac;
double complex *buf_rs = buf + dij * blksize * comp;
int gs0, gs1, dg, jL, i;
size_t dijg;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg; i++) {
buf[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
if (!ovlp_mask[jL]) {
continue;
}
shift_bas(env_loc, env, Ls, jptrxyz, jL);
fac = expkL[cell_loc_bvk[jL]];
if ((*intor)(buf_rs, shls, dims, eval_aopair, eval_gz,
fac, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
for (i = 0; i < dijg; i++) {
buf[i] += buf_rs[i];
}
}
}
(*fsort)(out, buf, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
static void sort_s1(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t nijg = naoi * naoj * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dg = gs1 - gs0;
const size_t dijg = di * dj * dg;
out += (ip * naoj + jp) * NGv + gs0;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
pout = out + (i*naoj+j) * NGv;
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[n] = pin[n];
}
} }
out += nijg;
in += dijg;
} }
}
static void sort_s2_igtj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const size_t dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
static void sort_s2_ieqj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const size_t dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
void PBC_ft_fill_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip >= jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_bvk_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_bvk_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip >= jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, char *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
static int subgroupGv(double *sGv, int *sgxyz, double *Gv, int *gxyz,
int nGv, int bufsize, int *shls_slice, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i;
int dimax = 0;
int djmax = 0;
for (i = shls_slice[0]; i < shls_slice[1]; i++) {
dimax = MAX(dimax, ao_loc[i+1]-ao_loc[i]);
}
for (i = shls_slice[2]; i < shls_slice[3]; i++) {
djmax = MAX(djmax, ao_loc[i+1]-ao_loc[i]);
}
int dij = dimax * djmax;
int gblksize = 0xfffffff8 & (bufsize / dij);
int gs0, dg;
for (gs0 = 0; gs0 < nGv; gs0 += gblksize) {
dg = MIN(nGv-gs0, gblksize);
for (i = 0; i < 3; i++) {
memcpy(sGv+dg*i, Gv+nGv*i+gs0, sizeof(double)*dg);
}
sGv += dg * 3;
if (gxyz != NULL) {
for (i = 0; i < 3; i++) {
memcpy(sgxyz+dg*i, gxyz+nGv*i+gs0, sizeof(int)*dg);
}
sgxyz += dg * 3;
}
}
return gblksize;
}
void PBC_ft_latsum_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *out, int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *sGv = malloc(sizeof(double) * nGv * 3);
int *sgxyz = NULL;
if (gxyz != NULL) {
sgxyz = malloc(sizeof(int) * nGv * 3);
}
int blksize;
if (fill == &PBC_ft_fill_nk1s1 || fill == &PBC_ft_fill_nk1s2 ||
fill == &PBC_ft_fill_nk1s1hermi) {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX*IMGBLK/2,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
} else {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
}
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel
{
int i, j, ij;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
size_t count = nkpts + IMGBLK;
double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp);
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz,
out, nkpts, comp, nimgs, blksize, i, j,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(sGv);
if (sgxyz != NULL) {
free(sgxyz);
}
}
void PBC_ft_bvk_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
int *cell_loc_bvk, char *ovlp_mask,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *sGv = malloc(sizeof(double) * nGv * 3);
int *sgxyz = NULL;
if (gxyz != NULL) {
sgxyz = malloc(sizeof(int) * nGv * 3);
}
int blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel
{
int i, j, ij;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
size_t count = nkpts + bvk_nimgs;
double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp);
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
i, j, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(sGv);
if (sgxyz != NULL) {
free(sgxyz);
}
}
|
test_openmp.c | #include "config.h"
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "kseq.h"
KSEQ_INIT(int, read)
#if HAVE_SSE2
#include "ssw.h"
#endif
#include "parasail.h"
#include "parasail/memory.h"
#include "parasail/stats.h"
//#include "timer.h"
#include "timer_real.h"
#if HAVE_SSE2
parasail_result_t* parasail_ssw_(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t * pmatrix,
int score_size)
{
parasail_result_t *result = parasail_result_new();
s_profile *profile = NULL;
int8_t *s1_num = (int8_t*)malloc(sizeof(int8_t) * s1_len);
int8_t *s2_num = (int8_t*)malloc(sizeof(int8_t) * s2_len);
int8_t *matrix = (int8_t*)malloc(sizeof(int8_t) * 24 * 24);
s_align *ssw_result = NULL;
int m = 0;
/* This table is used to transform amino acid letters into numbers. */
static const int8_t table[128] = {
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23,
14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23,
23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23,
14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23
};
/* initialize score matrix */
for (m = 0; m < s1_len; ++m) s1_num[m] = table[(int)s1[m]];
for (m = 0; m < s2_len; ++m) s2_num[m] = table[(int)s2[m]];
for (m = 0; m < 24*24; ++m) matrix[m] = pmatrix->matrix[m];
profile = ssw_init(s1_num, s1_len, matrix, 24, score_size);
ssw_result = ssw_align(profile, s2_num, s2_len, -open, -gap, 2, 0, 0, s1_len/2);
result->score = ssw_result->score1;
result->saturated = ssw_result->saturated;
align_destroy(ssw_result);
init_destroy(profile);
free(s1_num);
free(s2_num);
free(matrix);
return result;
}
parasail_result_t* parasail_ssw(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
return parasail_ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 2);
}
parasail_result_t* parasail_ssw_16(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
return parasail_ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 1);
}
#endif
parasail_result_t* parasail_sw(
const char * const restrict s1, const int s1Len,
const char * const restrict s2, const int s2Len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
int saturated = 0;
parasail_result_t *result;
result = parasail_sw_scan_8(s1, s1Len, s2, s2Len, open, gap, matrix);
if (result->saturated) {
saturated = 1;
parasail_result_free(result);
result = parasail_sw_scan_16(s1, s1Len, s2, s2Len, open, gap, matrix);
}
if (result->saturated) {
parasail_result_free(result);
result = parasail_sw_scan_32(s1, s1Len, s2, s2Len, open, gap, matrix);
}
result->saturated = saturated;
return result;
}
static inline void parse_sequences(
const char *filename, char ***strings_, size_t **sizes_, size_t *count_)
{
FILE* fp;
kseq_t *seq = NULL;
int l = 0;
char **strings = NULL;
size_t *sizes = NULL;
size_t count = 0;
size_t memory = 1000;
fp = fopen(filename, "r");
if(fp == NULL) {
perror("fopen");
exit(1);
}
strings = malloc(sizeof(char*) * memory);
sizes = malloc(sizeof(size_t) * memory);
seq = kseq_init(fileno(fp));
while ((l = kseq_read(seq)) >= 0) {
strings[count] = strdup(seq->seq.s);
if (NULL == strings[count]) {
perror("strdup");
exit(1);
}
sizes[count] = seq->seq.l;
++count;
if (count >= memory) {
char **new_strings = NULL;
size_t *new_sizes = NULL;
memory *= 2;
new_strings = realloc(strings, sizeof(char*) * memory);
if (NULL == new_strings) {
perror("realloc");
exit(1);
}
strings = new_strings;
new_sizes = realloc(sizes, sizeof(size_t) * memory);
if (NULL == new_sizes) {
perror("realloc");
exit(1);
}
sizes = new_sizes;
}
}
kseq_destroy(seq);
fclose(fp);
*strings_ = strings;
*sizes_ = sizes;
*count_ = count;
}
static inline unsigned long binomial_coefficient(unsigned long n, unsigned long k)
{
/* from http://blog.plover.com/math/choose.html */
unsigned long r = 1;
unsigned long d;
if (k > n) {
return 0;
}
for (d = 1; d <= k; d++) {
r *= n--;
r /= d;
}
return r;
}
static inline void k_combination2(unsigned long pos, unsigned long *a, unsigned long *b)
{
double s;
double i = floor(sqrt(2.0 * pos)) - 1.0;
if (i <= 1.0) {
i = 1.0;
}
s = i * (i - 1.0) / 2.0;
while (pos - s >= i) {
s += i;
i += 1;
}
*a = (unsigned long)(pos - s);
*b = (unsigned long)(i);
}
int main(int argc, char **argv)
{
double timer_clock = 0.0;
unsigned long i = 0;
unsigned long j = 0;
size_t limit = 0;
char *filename_database = NULL;
char **sequences_database = NULL;
size_t *sizes_database = NULL;
size_t seq_count_database = 0;
char *filename_queries = NULL;
char **sequences_queries = NULL;
size_t *sizes_queries = NULL;
size_t seq_count_queries = 0;
char *endptr = NULL;
char *funcname1 = NULL;
char *funcname2 = NULL;
parasail_function_t *function1 = NULL;
parasail_function_t *function2 = NULL;
int c = 0;
char *matrixname = "blosum62";
const parasail_matrix_t *matrix = NULL;
int gap_open = 10;
int gap_extend = 1;
int N = 1;
int saturated = 0;
int smallest_first = 0;
int biggest_first = 0;
int truncate = 0;
int iterations = 1;
int func_cutoff = 0;
int iter = 0;
stats_t stats_time;
stats_clear(&stats_time);
while ((c = getopt(argc, argv, "a:A:c:b:f:q:o:e:slt:i:")) != -1) {
switch (c) {
case 'a':
funcname1 = optarg;
break;
case 'A':
funcname2 = optarg;
break;
case 'b':
matrixname = optarg;
break;
case 'c':
errno = 0;
func_cutoff = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'f':
filename_database = optarg;
break;
case 'q':
filename_queries = optarg;
break;
case 'i':
errno = 0;
iterations = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'o':
errno = 0;
gap_open = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'e':
errno = 0;
gap_extend = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 's':
smallest_first = 1;
break;
case 'l':
biggest_first = 1;
break;
case 't':
errno = 0;
truncate = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case '?':
if (optopt == 'a'
|| optopt == 'b'
|| optopt == 'e'
|| optopt == 'f'
|| optopt == 'i'
|| optopt == 'n'
|| optopt == 'o'
|| optopt == 't')
{
fprintf(stderr,
"Option -%c requires an argument.\n",
optopt);
}
else if (isprint(optopt)) {
fprintf(stderr, "Unknown option `-%c'.\n",
optopt);
}
else {
fprintf(stderr,
"Unknown option character `\\x%x'.\n",
optopt);
}
exit(1);
default:
fprintf(stderr, "default case in getopt\n");
exit(1);
}
}
if (smallest_first && biggest_first) {
fprintf(stderr, "cannot choose both smallest and biggest first\n");
exit(1);
}
/* select the function */
if (funcname1) {
function1 = parasail_lookup_function(funcname1);
#if HAVE_SSE2
if (NULL == function1) {
if (0 == strcmp(funcname1, "ssw_16")) {
function1 = parasail_ssw_16;
}
else if (0 == strcmp(funcname1, "ssw_8")) {
function1 = parasail_ssw;
}
}
#endif
if (NULL == function1) {
fprintf(stderr, "Specified function1 not found.\n");
exit(1);
}
}
else {
fprintf(stderr, "No alignment function1 specified.\n");
exit(1);
}
if (funcname2) {
function2 = parasail_lookup_function(funcname2);
if (NULL == function2) {
fprintf(stderr, "Specified function2 not found.\n");
exit(1);
}
}
/* select the substitution matrix */
if (matrixname) {
matrix = parasail_matrix_lookup(matrixname);
if (NULL == matrix) {
fprintf(stderr, "Specified substitution matrix not found.\n");
exit(1);
}
}
if (filename_database) {
parse_sequences(filename_database, &sequences_database, &sizes_database, &seq_count_database);
}
else {
fprintf(stderr, "missing database filename\n");
exit(1);
}
limit = binomial_coefficient(seq_count_database, 2);
//printf("%lu choose 2 is %lu\n", seq_count_database, limit);
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp single
{
N = omp_get_max_threads();
//printf("omp_get_max_threads()=%d\n", N);
}
}
#endif
if (filename_queries) {
parse_sequences(filename_queries,
&sequences_queries, &sizes_queries, &seq_count_queries);
double total_timer = 0.0;
for (i=0; i<seq_count_queries; ++i) {
int saturated_query = 0;
double local_timer = 0.0;
parasail_function_t *function = function1;
if (func_cutoff > 0) {
if (sizes_queries[i] > (unsigned long)func_cutoff) {
function = function2;
}
}
local_timer = timer_real();
#pragma omp parallel
{
#pragma omp for schedule(guided)
for (j=0; j<seq_count_database; ++j) {
parasail_result_t *result = function(
sequences_queries[i], sizes_queries[i],
sequences_database[j], sizes_database[j],
gap_open, gap_extend, matrix);
#pragma omp atomic
saturated_query += result->saturated;
parasail_result_free(result);
}
}
local_timer = timer_real() - local_timer;
total_timer += local_timer;
printf("%lu\t %lu\t %d\t %f\n",
i, (unsigned long)sizes_queries[i],
saturated_query, local_timer);
fflush(stdout);
}
printf("total_time=%f\n", total_timer);
fflush(stdout);
}
else {
for (iter=0; iter<iterations; ++iter) {
timer_clock = timer_real();
#pragma omp parallel
{
unsigned long a=0;
unsigned long b=1;
unsigned long swap=0;
#pragma omp for schedule(guided)
for (i=0; i<limit; ++i) {
parasail_function_t *function = function1;
parasail_result_t *result = NULL;
unsigned long query_size;
k_combination2(i, &a, &b);
if (smallest_first) {
if (sizes_database[a] > sizes_database[b]) {
swap = a;
a = b;
b = swap;
}
}
else if (biggest_first) {
if (sizes_database[a] < sizes_database[b]) {
swap = a;
a = b;
b = swap;
}
}
query_size = sizes_database[a];
if (truncate > 0) {
if (query_size > (unsigned long)truncate) {
query_size = truncate;
}
}
if (func_cutoff > 0) {
if (query_size > (unsigned long)func_cutoff) {
function = function2;
}
}
result = function(
sequences_database[a], query_size,
sequences_database[b], sizes_database[b],
gap_open, gap_extend, matrix);
#pragma omp atomic
saturated += result->saturated;
parasail_result_free(result);
}
}
timer_clock = timer_real() - timer_clock;
stats_sample_value(&stats_time, timer_clock);
}
printf("%s\t %s\t %d\t %d\t %d\t %d\t %f\t %f\t %f\t %f\n",
funcname1, matrixname, gap_open, gap_extend, N,
saturated,
stats_time._mean, stats_stddev(&stats_time),
stats_time._min, stats_time._max);
fflush(stdout);
}
return 0;
}
|
onePunch.c | #include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#include <stdlib.h>
#include <limits.h>
#define MAX 10000
#define NOT_CONNECTED (INT_MAX)
int diameter(int distance[MAX][MAX], int nodesCount);
int distance[MAX][MAX];
/* initialize all distances to */
void Initialize() {
for (int i = 0; i < MAX; ++i) {
for (int j = 0; j < MAX; ++j) {
distance[i][j] = NOT_CONNECTED;
}
distance[i][i] = 0;
}
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * (uint64_t) 1000000 + tv.tv_usec;
}
int main() {
/* number of nodes */
int nodeCount;
/* Number of edges */
int m;
Initialize();
/* get the node count */
if (scanf("%d", &nodeCount) < 1) {
fprintf(stderr, "Error reading node count\n");
exit(1);
}
if (nodeCount < 1 || nodeCount > MAX) {
fprintf(stderr, "Invalid number of nodes, %d. Must be 1 to %d\n",
nodeCount, MAX);
exit(1);
}
/* edge count */
if (scanf("%d", &m) < 1) {
fprintf(stderr, "Error reading edge count\n");
exit(1);
}
if (m < nodeCount - 1 || m > nodeCount * (nodeCount - 1)) {
fprintf(stderr, "Invalid number of edges, %d. Must be %d to %d\n",
m, nodeCount - 1, nodeCount * (nodeCount - 1));
exit(1);
}
while (m--) {
/* nodes - let the indexation begin from 0 */
int a, b;
/* edge weight */
int c;
if (scanf("%d %d %d", &a, &b, &c) < 3) {
fprintf(stderr, "Error reading edge\n");
exit(1);
}
if (a < 0 || a >= nodeCount || b < 0 || b >= nodeCount || c <= 0) {
fprintf(stderr, "Invalid edge: from %d to %d weight %d\n", a, b, c);
exit(1);
}
distance[a][b] = c;
}
uint64_t start = GetTimeStamp();
printf("Diameter %d\n", diameter(distance, nodeCount));
printf("Time: %ld us\n", (uint64_t) (GetTimeStamp() - start));
//
return 0;
}
/******************************************************************************/
/* Your changes here */
#include "omp.h"
int localVertexCount;
//int localDistance[MAX][MAX];
//#pragma omp threadprivate(localVertexCount,localDistance)
#pragma omp threadprivate(localVertexCount)
int *Dijkstra(int fromVertex, int vertexCount, int graph[MAX][MAX]);
int diameter(int givenDistance[MAX][MAX], int vertexCount) {
int *distancesTable[vertexCount];
localVertexCount = vertexCount;
#pragma omp parallel for copyin(localVertexCount)
for (int fromVertex = 0; fromVertex < vertexCount; ++fromVertex) {
distancesTable[fromVertex] = Dijkstra(fromVertex, localVertexCount, givenDistance);
}
int diameter = -1;
for (int i = 0; i < vertexCount; ++i) {
int maxDistance = 0;
for (int j = 0; j < localVertexCount; ++j) {
if (*(distancesTable[i] + j) > maxDistance && *(distancesTable[i] + j) != NOT_CONNECTED)
maxDistance = *(distancesTable[i] + j);
}
if (maxDistance > diameter) {
diameter = maxDistance;
}
}
return diameter;
}
int *Dijkstra(int fromVertex, int vertexCount, int graph[MAX][MAX]) {
int visitedVertex[vertexCount];
// int distancesOfThisVertex[vertexCount];
int *distancesOfThisVertex = malloc(vertexCount * sizeof(int));
int minEdge, vertex = 0, searchedEdgesCount = 0;
visitedVertex[fromVertex] = 1;
for (int i = 0; i < vertexCount; ++i) {
visitedVertex[i] = 0;
distancesOfThisVertex[i] = graph[fromVertex][i];
}
distancesOfThisVertex[fromVertex] = 0;
while (searchedEdgesCount < vertexCount - 1) {
searchedEdgesCount++;
minEdge = NOT_CONNECTED;
for (int i = 0; i < vertexCount; ++i) {
if (visitedVertex[i] == 0 && minEdge > distancesOfThisVertex[i]) {
vertex = i;
minEdge = distancesOfThisVertex[i];
}
}
visitedVertex[vertex] = 1;
for (int i = 0; i < vertexCount; ++i) {
if (visitedVertex[i] == 0 && graph[vertex][i] != NOT_CONNECTED &&
distancesOfThisVertex[vertex] != NOT_CONNECTED &&
distancesOfThisVertex[vertex] + graph[vertex][i] < distancesOfThisVertex[i]) {
distancesOfThisVertex[i] = distancesOfThisVertex[vertex] + graph[vertex][i];
}
}
}
return distancesOfThisVertex;
}
/* The following is the exact command used to compile this code */
/* g++ -O2 graph-diameter.cpp -o graph-diameter */ |
memdbg.h | /* ****** NOTE ******
* This header file should be the LAST header file included within every
* .c file within the project. If there are .h files that have actual
* code in them, then this header should be the last include within that
* .h file, and that .h file should be the last one included within the
* .c file.
* ****** NOTE *****
*/
#if !defined (__MEM_DBG_H_)
#define __MEM_DBG_H_
// values to use within the MemDbg_Validate() function.
#define MEMDBG_VALIDATE_MIN 0
#define MEMDBG_VALIDATE_DEEP 1
#define MEMDBG_VALIDATE_DEEPER 2
#define MEMDBG_VALIDATE_DEEPEST 3
#include <stdio.h>
#include <stdlib.h>
#include "os.h"
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#include <string.h>
#include "memory.h"
#if defined (MEMDBG_ON)
/*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
/*
* memdbg.h
* Memory management debugging (at runtime)
*
* memdbg contains routines detect, and report memory
* problems, such as double frees, passing bad pointers to
* free, most buffer overwrites. Also, tracking of non-freed
* data, showing memory leaks, can also be shown.
*
* Compilation Options (provided from Makefile CFLAGS)
*
* MEMDBG_ON If this is NOT defined, then memdbg will
* get out of your way, and most normal memory functions
* will be called with no overhead at all.
*/
/* these functions can be called by client code. Normally Memdbg_Used() and
* MemDbg_Display() would be called at program exit. That will dump a list
* of any memory that was not released. The MemDbg_Validate() can be called
* pretty much any time. That function will walk the memory allocation linked
* lists, and sqwack if there are problems, such as overwrites, freed memory that
* has been written to, etc. It would likely be good to call MemDbg_Validate()
* within benchmarking, after every format is tested.
*
* TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display()
* and a function to get the 'current' state of memory as a handle. Thus, a
* format self test could get a handle BEFORE starting, and then check after, and
* ONLY show leaked memory from the time the handle was obtained, which was at the
* start of the self test. Thus it would only show leaks from that format test.
*
* These functions are NOT thread safe. Do not call them within OMP blocks of code.
* Normally, these would be called at program exit, or within things like format
* self test code, etc, and not within OMP. But this warning is here, so that
* it is known NOT to call within OMP.
*/
extern size_t MemDbg_Used(int show_freed);
extern void MemDbg_Display(FILE *);
extern void MemDbg_Validate(int level);
extern void MemDbg_Validate_msg(int level, const char *pMsg);
extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData);
/* these functions should almost NEVER be called by any client code. They
* are listed here, because the macros need to know their names. Client code
* should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc()
* If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this
* function would not be declared here, AND at link time, the function would
* not be found.
* NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic)
* also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is
* very slow, and the thread safety required makes it even slow. This is not only talking
* about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that
* at almost all costs, and performance will usually go up.
*/
extern void *MEMDBG_alloc(size_t, char *, int);
extern void *MEMDBG_alloc_align(size_t, int, char *, int);
extern void *MEMDBG_calloc(size_t count, size_t, char *, int);
extern void *MEMDBG_realloc(void *, size_t, char *, int);
extern void MEMDBG_free(const void *, char *, int);
extern char *MEMDBG_strdup(const char *, char *, int);
#if !defined(__MEMDBG_C_FILE__)
/* we get here on every file compiled EXCEPT memdbg.c */
#undef malloc
#undef realloc
#undef free
#undef strdup
#undef libc_free
#undef libc_calloc
#undef libc_malloc
#define libc_free(a) MEMDBG_libc_free(a)
#define libc_malloc(a) MEMDBG_libc_alloc(a)
#define libc_calloc(a,b) MEMDBG_libc_calloc(a,b)
#define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__)
#define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__)
#define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__)
#define free(a) MEMDBG_free((a),__FILE__,__LINE__)
#define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__)
#endif /* !defined __MEMDBG_C_FILE__ */
/* pass the file handle to write to (normally stderr) */
#define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \
if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \
MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0)
typedef struct MEMDBG_HANDLE_t {
unsigned id;
unsigned alloc_cnt;
size_t mem_size;
} MEMDBG_HANDLE;
/*
* these functions give a caller some of the INSIDE information about the
* allocated object. We simply return data from inside the memdbg header.
* NOTE, if fence post is not valid, we still return something, BUT will
* also return something in the err_msg stating this may not be valid.
*/
/* The count 'id' of an allocated block. Same as used in leak report */
unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg);
/* the size allocated of the contained block */
size_t MEMDBG_get_size(const void *ptr, const char **err_msg);
/* what file (source) did the allocation */
const char *MEMDBG_get_file(const void *ptr, const char **err_msg);
/* what file (source) line number did the allocation */
unsigned MEMDBG_get_line(const void *ptr, const char **err_msg);
/*
* these functions allow taking a memory snapshot, calling some code, then validating that memory
* is the same after the code. This will help catch memory leaks and other such problems, within
* formats and such. Simply get the snapshot, run self tests (or other), when it exits, check
* the snapshot to make sure nothing leaked.
*/
/* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */
MEMDBG_HANDLE MEMDBG_getSnapshot(int id);
/* will not exit on leaks. Does exit, on memory overwrite corruption. */
void MEMDBG_checkSnapshot(MEMDBG_HANDLE);
/* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */
void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks);
/*
* the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate
* so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected
* to leak, until program exit. At that time, any that was not freed, will be shown as leaked.
* THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe.
*/
void MEMDBG_tag_mem_from_alloc_tiny(void *);
extern void MEMDBG_libc_free(void *);
extern void *MEMDBG_libc_alloc(size_t size);
extern void *MEMDBG_libc_calloc(size_t count, size_t size);
#else
#define libc_alloc alloc
#define libc_calloc calloc
#define libc_malloc malloc
#define libc_free free
#define MemDbg_Used(a) 0
#define MemDbg_Display(a)
#define MemDbg_Validate(a)
#define MemDbg_Validate_msg(a,b)
#define MemDbg_Validate_msg2(a,b,c)
#define MEMDBG_PROGRAM_EXIT_CHECKS(a)
#define MEMDBG_tag_mem_from_alloc_tiny(a)
#define MEMDBG_HANDLE int
#define MEMDBG_getSnapshot(a) 0
#define MEMDBG_checkSnapshot(a) if (a) printf(" \b")
#define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if (a) printf(" \b")
#endif /* MEMDBG_ON */
#endif /* __MEMDBG_H_ */
|
mirbooking-broker.c | #include "mirbooking-broker.h"
#include "mirbooking-score-table-private.h"
#include <stdlib.h>
#include <math.h>
#include <pb.h>
#include <string.h>
#include <odeint.h>
#if HAVE_OPENMP
#include <omp.h>
#endif
#include <sparse.h>
#include <stdio.h>
#if HAVE_MKL_CBLAS
#include <mkl_cblas.h>
#elif HAVE_OPENBLAS
#include <openblas/cblas.h>
#else
#include <cblas.h>
#endif
#define RTOL 1e-6
#define ATOL 1e-8
#define COALESCE(x,d) (x == NULL ? (d) : (x))
typedef struct _MirbookingTargetPositions
{
gsize *positions;
gsize positions_len;
GPtrArray *occupants;
} MirbookingTargetPositions;
static void
mirbooking_target_positions_clear (MirbookingTargetPositions *tss)
{
g_free (tss->positions);
g_ptr_array_unref (tss->occupants);
}
typedef struct
{
gint rank;
gsize prime5_footprint;
gsize prime3_footprint;
MirbookingScoreTable *score_table;
GPtrArray *targets;
GPtrArray *mirnas;
GHashTable *quantification; // #MirbookingSequence -> #gfloat (initial quantity)
/* whether or not the system has been initialized */
gsize init;
/* all the target sites, stored contiguously */
GArray *target_sites;
GHashTable *target_sites_by_target;
GPtrArray *target_sites_by_target_index;
GArray *target_positions; // (target, mirna) -> positions, scores and occupants in the target
GArray *occupants;
/* transcription */
gdouble *targets_ktr;
gdouble *mirnas_ktr;
/* state of the system */
/* state of the system, which corresponds to the concatenation of the
* various concentration vectors
*
* [E] [S] [ES] [P]
*/
gdouble t;
gdouble *y;
gsize y_len;
/* shortcuts over 'y' */
gdouble *E; // len(mirnas)
gdouble *S; // len(targets)
gdouble *ES; // len(occupants)
gdouble *P; // len(S)
/* odeint */
OdeIntIntegrator *integrator;
gdouble *F;
/* shortcuts over 'F' */
gdouble *dEdt;
gdouble *dSdt;
gdouble *dESdt;
gdouble *dPdt;
/* steady-state solver */
MirbookingBrokerSparseSolver sparse_solver;
SparseSolver *solver;
/* for compactness and efficiency, the Jacobian is only defined over [ES] */
SparseMatrix *J; // len(ES) * len(ES)
gdouble *ES_delta; // len(ES)
} MirbookingBrokerPrivate;
struct _MirbookingBroker
{
GObject parent_instance;
MirbookingBrokerPrivate *priv;
};
G_DEFINE_TYPE_WITH_PRIVATE (MirbookingBroker, mirbooking_broker, G_TYPE_OBJECT)
static void
mirbooking_broker_init (MirbookingBroker *self)
{
self->priv = g_new0 (MirbookingBrokerPrivate, 1);
self->priv->targets = g_ptr_array_new_with_free_func (g_object_unref);
self->priv->mirnas = g_ptr_array_new_with_free_func (g_object_unref);
self->priv->quantification = g_hash_table_new ((GHashFunc) mirbooking_sequence_hash,
(GEqualFunc) mirbooking_sequence_equal);
}
enum
{
PROP_RANK = 1,
PROP_5PRIME_FOOTPRINT,
PROP_3PRIME_FOOTPRINT,
PROP_SCORE_TABLE,
PROP_SPARSE_SOLVER
};
static void
mirbooking_broker_set_property (GObject *object, guint property_id, const GValue *value, GParamSpec *pspec)
{
MirbookingBroker *self = MIRBOOKING_BROKER (object);
switch (property_id)
{
case PROP_RANK:
self->priv->rank = g_value_get_int (value);
break;
case PROP_5PRIME_FOOTPRINT:
self->priv->prime5_footprint = g_value_get_uint (value);
break;
case PROP_3PRIME_FOOTPRINT:
self->priv->prime3_footprint = g_value_get_uint (value);
break;
case PROP_SCORE_TABLE:
mirbooking_broker_set_score_table (self, g_value_get_object (value));
break;
case PROP_SPARSE_SOLVER:
mirbooking_broker_set_sparse_solver (self, g_value_get_enum (value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
}
static void
mirbooking_broker_get_property (GObject *object, guint property_id, GValue *value, GParamSpec *pspec)
{
MirbookingBroker *self = MIRBOOKING_BROKER (object);
switch (property_id)
{
case PROP_RANK:
g_value_set_int (value, self->priv->rank);
break;
case PROP_5PRIME_FOOTPRINT:
g_value_set_uint (value, self->priv->prime5_footprint);
break;
case PROP_3PRIME_FOOTPRINT:
g_value_set_uint (value, self->priv->prime3_footprint);
break;
case PROP_SCORE_TABLE:
g_value_set_object (value, self->priv->score_table);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
}
static size_t
_mirbooking_broker_get_occupant_index (MirbookingBroker *self, const MirbookingOccupant *occupant)
{
return occupant - &g_array_index (self->priv->occupants, MirbookingOccupant, 0);
}
gdouble
_mirbooking_broker_get_occupant_quantity (MirbookingBroker *self, const MirbookingOccupant *occupant, const gdouble *ES)
{
gsize k = _mirbooking_broker_get_occupant_index (self, occupant);
g_assert_cmpint (k, >=, 0);
g_assert_cmpint (k, <, self->priv->occupants->len);
return ES[k];
}
gdouble
mirbooking_broker_get_occupant_quantity (MirbookingBroker *self, const MirbookingOccupant *occupant)
{
g_return_val_if_fail (self->priv->init, 0.0);
return _mirbooking_broker_get_occupant_quantity (self, occupant, self->priv->ES);
}
#if !GLIB_CHECK_VERSION(2,54,0)
static gboolean
g_ptr_array_find_with_equal_func (GPtrArray *array, gconstpointer elem, GEqualFunc equal_func, guint *index)
{
guint i;
for (i = 0; i < array->len; i++)
{
if (equal_func (g_ptr_array_index (array, i), elem))
{
if (index)
*index = i;
return TRUE;
}
}
return FALSE;
}
#endif
/**
* mirbooking_broker_set_occupant_quantity:
* @self: A #MirbookingBroker
* @occupant: A #MirbookingOccupant previously obtained from #mirbooking_broker_get_target_sites
* @quantity: The new quantity of that occupant
*
* Set the concentration of an occupant to the given value.
*/
void
mirbooking_broker_set_occupant_quantity (MirbookingBroker *self, const MirbookingOccupant *occupant, gdouble quantity)
{
g_return_if_fail (self->priv->init);
g_return_if_fail (quantity >= 0);
guint i;
g_return_if_fail (g_ptr_array_find_with_equal_func (self->priv->mirnas, occupant->mirna, (GEqualFunc) mirbooking_sequence_equal, &i));
gsize k = _mirbooking_broker_get_occupant_index (self, occupant);
self->priv->ES[k] = quantity;
}
static void
mirbooking_target_site_clear (MirbookingTargetSite *self)
{
g_object_unref (self->target);
g_slist_free_full (self->occupants, (GDestroyNotify) mirbooking_occupant_clear);
}
static void
mirbooking_broker_finalize (GObject *object)
{
MirbookingBroker *self = MIRBOOKING_BROKER (object);
if (self->priv->score_table)
{
g_object_unref (self->priv->score_table);
}
g_hash_table_unref (self->priv->quantification);
g_ptr_array_unref (self->priv->targets);
g_ptr_array_unref (self->priv->mirnas);
if (self->priv->target_sites)
{
g_array_unref (self->priv->target_sites);
g_hash_table_unref (self->priv->target_sites_by_target);
g_ptr_array_unref (self->priv->target_sites_by_target_index);
g_array_unref (self->priv->target_positions);
g_array_unref (self->priv->occupants);
}
if (self->priv->integrator)
{
g_free (self->priv->y);
g_free (self->priv->F);
g_free (self->priv->targets_ktr);
g_free (self->priv->mirnas_ktr);
odeint_integrator_free (self->priv->integrator);
}
if (self->priv->J)
{
sparse_matrix_clear (self->priv->J);
g_free (self->priv->J);
}
if (self->priv->ES_delta)
g_free (self->priv->ES_delta);
sparse_solver_free (self->priv->solver);
g_free (self->priv);
G_OBJECT_CLASS (mirbooking_broker_parent_class)->finalize (object);
}
static void
mirbooking_broker_class_init (MirbookingBrokerClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->set_property = mirbooking_broker_set_property;
object_class->get_property = mirbooking_broker_get_property;
object_class->finalize = mirbooking_broker_finalize;
g_object_class_install_property (object_class, PROP_RANK,
g_param_spec_int ("rank", "", "", 0, G_MAXINT, 0, G_PARAM_CONSTRUCT_ONLY | G_PARAM_READWRITE));
g_object_class_install_property (object_class, PROP_5PRIME_FOOTPRINT,
g_param_spec_uint ("prime5-footprint", "", "", 0, G_MAXUINT, MIRBOOKING_BROKER_DEFAULT_5PRIME_FOOTPRINT, G_PARAM_CONSTRUCT | G_PARAM_READWRITE));
g_object_class_install_property (object_class, PROP_3PRIME_FOOTPRINT,
g_param_spec_uint ("prime3-footprint", "", "", 0, G_MAXUINT, MIRBOOKING_BROKER_DEFAULT_3PRIME_FOOTPRINT, G_PARAM_CONSTRUCT | G_PARAM_READWRITE));
g_object_class_install_property (object_class, PROP_SCORE_TABLE,
g_param_spec_object ("score-table", "", "", MIRBOOKING_TYPE_SCORE_TABLE, G_PARAM_CONSTRUCT | G_PARAM_READWRITE));
g_object_class_install_property (object_class, PROP_SPARSE_SOLVER,
g_param_spec_enum ("sparse-solver", "", "", MIRBOOKING_BROKER_SPARSE_SOLVER_ENUM, MIRBOOKING_BROKER_DEFAULT_SPARSE_SOLVER, G_PARAM_CONSTRUCT | G_PARAM_READWRITE));
}
/**
* mirbooking_broker_new:
*
* Returns: (transfer full): A plain #Mirbooking instance
*/
MirbookingBroker *
mirbooking_broker_new (void)
{
return g_object_new (MIRBOOKING_BROKER_TYPE, NULL);
}
/**
* mirbooking_broker_new_with_rank:
*
* Returns: (transfer full): A plain #Mirbooking instance with specified rank
*/
MirbookingBroker *
mirbooking_broker_new_with_rank (gint rank)
{
return g_object_new (MIRBOOKING_BROKER_TYPE, "rank", rank, NULL);
}
/**
* mirbooking_broker_get_rank:
*
* Obtain the rank of this broker in a distributed context.
*/
gint
mirbooking_broker_get_rank (MirbookingBroker *self)
{
return self->priv->rank;
}
void
mirbooking_broker_set_5prime_footprint (MirbookingBroker *self,
gsize footprint)
{
self->priv->prime5_footprint = footprint;
}
void
mirbooking_broker_set_3prime_footprint (MirbookingBroker *self,
gsize footprint)
{
self->priv->prime3_footprint = footprint;
}
void
mirbooking_broker_set_sparse_solver (MirbookingBroker *self,
MirbookingBrokerSparseSolver sparse_solver)
{
if (self->priv->solver == NULL || self->priv->sparse_solver != sparse_solver)
{
if (self->priv->solver)
{
sparse_solver_free (self->priv->solver);
}
self->priv->sparse_solver = sparse_solver;
self->priv->solver = sparse_solver_new ((SparseSolverMethod) sparse_solver);
g_return_if_fail (self->priv->solver != NULL);
g_object_notify (G_OBJECT (self), "sparse-solver");
}
}
/**
* mirbooking_broker_get_score_table:
* Obtain the #MirbookingScoreTable used by this for computing duplex scores.
*
* Returns: (transfer none)
*/
MirbookingScoreTable *
mirbooking_broker_get_score_table (MirbookingBroker *self)
{
return self->priv->score_table;
}
void
mirbooking_broker_set_score_table (MirbookingBroker *self, MirbookingScoreTable *score_table)
{
g_return_if_fail (!self->priv->init);
if (score_table != self->priv->score_table)
{
g_clear_object (&self->priv->score_table);
self->priv->score_table = g_object_ref (score_table);
g_object_notify (G_OBJECT (self), "score-table");
}
}
union gfloatptr
{
gfloat f;
gpointer p;
};
static gfloat
gfloat_from_gpointer (gpointer ptr)
{
union gfloatptr flt = { .p = ptr };
return flt.f;
}
static gpointer
gpointer_from_gfloat (gfloat flt)
{
union gfloatptr ptr = { .f = flt };
return ptr.p;
}
/**
* mirbooking_broker_get_bound_mirna_quantity:
*
* Obtain the bound quantity of @mirna.
*/
gdouble
mirbooking_broker_get_bound_mirna_quantity (MirbookingBroker *self, MirbookingMirna *mirna)
{
g_return_val_if_fail (self->priv->init, 0.0);
gdouble bound_quantity = 0;
gsize k;
#pragma omp parallel for reduction(+:bound_quantity)
for (k = 0; k < self->priv->occupants->len; k++)
{
MirbookingOccupant *occupant = &g_array_index (self->priv->occupants, MirbookingOccupant, k);
if (occupant->mirna == mirna)
{
bound_quantity += self->priv->ES[_mirbooking_broker_get_occupant_index (self, occupant)];
}
}
return bound_quantity;
}
/**
* mirbooking_broker_get_sequence_quantity:
* @sequence: A #MirbookingSequence to retrieve quantity
*
* Retrieve the free quantity of @sequence.
*/
gdouble
mirbooking_broker_get_sequence_quantity (MirbookingBroker *self, MirbookingSequence *sequence)
{
g_return_val_if_fail (g_hash_table_contains (self->priv->quantification, sequence), 0.0);
if (self->priv->init)
{
if (MIRBOOKING_IS_MIRNA (sequence))
{
guint i;
if (g_ptr_array_find_with_equal_func (self->priv->mirnas, sequence, (GEqualFunc) mirbooking_sequence_equal, &i))
{
return self->priv->E[i];
}
}
else
{
guint i;
if (g_ptr_array_find_with_equal_func (self->priv->targets, sequence, (GEqualFunc) mirbooking_sequence_equal, &i))
{
return self->priv->S[i];
}
}
}
else
{
return gfloat_from_gpointer (g_hash_table_lookup (self->priv->quantification, sequence));
}
g_return_val_if_reached (0.0);
}
/**
* mirbooking_broker_get_product_quantity:
*/
gdouble
mirbooking_broker_get_product_quantity (MirbookingBroker *self, MirbookingTarget *target)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i), 0.0);
return self->priv->P[i];
}
/**
* mirbooking_broker_set_product_quantity:
*/
void
mirbooking_broker_set_product_quantity (MirbookingBroker *self, MirbookingTarget *target, gdouble quantity)
{
g_return_if_fail (self->priv->init);
g_return_if_fail (quantity >= 0);
guint i;
g_return_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i));
self->priv->P[i] = quantity;
}
/**
* mirbooking_set_sequence_quantity:
* @sequence: A #MirbookingSequence being quantified for the
* upcoming execution
*
* Set the free concentration of @sequence to @quantity. If @sequence is not
* part of the system, it is added.
*
* The concentration of a #MirbookingMirna can be set to a negative value, as
* long as the total amount present (see #mirbooking_broker_get_bound_mirna_quantity)
* is positive.
*
* Note that no new sequence can be added this way once
* #mirbooking_broker_evaluate has been called.
*/
void
mirbooking_broker_set_sequence_quantity (MirbookingBroker *self, MirbookingSequence *sequence, gdouble quantity)
{
g_return_if_fail (MIRBOOKING_IS_MIRNA (sequence) || MIRBOOKING_IS_TARGET (sequence));
g_return_if_fail (self->priv->init == 0 || g_hash_table_contains (self->priv->quantification, sequence));
g_return_if_fail (MIRBOOKING_IS_MIRNA (sequence) || quantity >= 0);
/* update the system */
// TODO: have reverse-index for these use cases
if (self->priv->init)
{
if (MIRBOOKING_IS_MIRNA (sequence))
{
g_return_if_fail (quantity + mirbooking_broker_get_bound_mirna_quantity (self, MIRBOOKING_MIRNA (sequence)) >= 0);
guint i;
if (g_ptr_array_find_with_equal_func (self->priv->mirnas, sequence, (GEqualFunc) mirbooking_sequence_equal, &i))
{
self->priv->E[i] = quantity;
}
}
else
{
guint i;
if (g_ptr_array_find_with_equal_func (self->priv->targets, sequence, (GEqualFunc) mirbooking_sequence_equal, &i))
{
self->priv->S[i] = quantity;
}
}
}
if (!g_hash_table_contains (self->priv->quantification, sequence))
{
if (MIRBOOKING_IS_MIRNA (sequence))
{
g_ptr_array_add (self->priv->mirnas,
g_object_ref (sequence));
}
else
{
g_ptr_array_add (self->priv->targets,
g_object_ref (sequence));
}
}
g_hash_table_insert (self->priv->quantification,
sequence,
gpointer_from_gfloat (quantity));
}
/**
* mirbooking_broker_get_time:
*
* Get the time in seconds of the system.
*
* This is much more accurate to retrieve the time this way than to keep an
* external counter because of some numerical errors that can accumulate when
* stepping.
*/
gdouble
mirbooking_broker_get_time (MirbookingBroker *self)
{
return self->priv->t;
}
/**
* mirbooking_broker_set_time:
*
* Set the initial time for the numerical integration.
*/
void
mirbooking_broker_set_time (MirbookingBroker *self, gdouble time)
{
g_return_if_fail (!self->priv->init);
self->priv->t = time;
}
/*
* Compute the footprint window in which two microRNA can have overlapping
* footprint at this position.
*/
static void
_mirbooking_broker_get_footprint_window (MirbookingBroker *self,
const MirbookingTargetSite *target_site,
gsize prime5_footprint,
gsize prime3_footprint,
const MirbookingTargetSite **from_target_site,
const MirbookingTargetSite **to_target_site)
{
// find the lower target site
*from_target_site = target_site - MIN (prime5_footprint, target_site->position);
// find the upper target site
*to_target_site = target_site + MIN (prime3_footprint,
mirbooking_sequence_get_sequence_length (MIRBOOKING_SEQUENCE (target_site->target)) - target_site->position - 1);
g_assert ((*from_target_site)->target == target_site->target);
g_assert ((*to_target_site)->target == target_site->target);
}
gdouble
_mirbooking_broker_get_target_site_occupants_quantity (MirbookingBroker *self,
const MirbookingTargetSite *target_site,
const gdouble *ES)
{
gdouble total_ES = 0;
GSList *occupants_list;
for (occupants_list = target_site->occupants; occupants_list != NULL; occupants_list = occupants_list->next)
{
MirbookingOccupant *occupant = occupants_list->data;
total_ES += _mirbooking_broker_get_occupant_quantity (self, occupant, ES);
}
return total_ES;
}
gdouble
_mirbooking_broker_get_target_site_vacancy (MirbookingBroker *self,
const MirbookingTargetSite *target_site,
gsize prime5_footprint,
gsize prime3_footprint,
gdouble St,
const gdouble *ES)
{
gdouble vacancy = 1.0;
const MirbookingTargetSite *from_target_site, *to_target_site;
_mirbooking_broker_get_footprint_window (self,
target_site,
prime5_footprint,
prime3_footprint,
&from_target_site,
&to_target_site);
/*
* Curiously, this corresponds to the following factorization of the joint
* distribution of having all positions simultaneously unbound.
*
* Pr(p_1,p_2,...,p_n) = Pr(p_1) * Pr(p_2|p_1) * ... * Pr(p_n|p_1, p_2,... p_{n-1})
*
* Pr(p_1) is straightforward to calculate.
*
* For every other position, we compute the vacancy by assuming that every
* preceding positions are unbounded as well.
*/
const MirbookingTargetSite *last_tts = NULL;
const MirbookingTargetSite *ts;
for (ts = from_target_site; ts <= to_target_site; ts++)
{
/*
* Here, we invert the footprint because we want to know where other
* occupants can overlap this position.
*/
const MirbookingTargetSite *fts, *tts;
_mirbooking_broker_get_footprint_window (self,
ts,
prime3_footprint,
prime5_footprint,
&fts,
&tts);
gdouble quantity = 0;
const MirbookingTargetSite *nearby_target_site;
for (nearby_target_site = fts; nearby_target_site <= tts; nearby_target_site++)
{
if (nearby_target_site > last_tts)
{
quantity += _mirbooking_broker_get_target_site_occupants_quantity (self, nearby_target_site, ES);
}
}
vacancy *= (1 - (quantity / St));
last_tts = tts;
}
return vacancy;
}
typedef struct _MirbookingScoredTargetSite
{
MirbookingTargetSite *target_site;
MirbookingScore score;
} MirbookingScoredTargetSite;
static gint
cmp_gsize (gconstpointer a, gconstpointer b)
{
return *(const gsize*)a - *(const gsize*)b;
}
static gboolean
_mirbooking_broker_prepare_step (MirbookingBroker *self, GError **error)
{
guint64 prepare_begin = g_get_monotonic_time ();
gsize target_sites_len = 0;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (self->priv->score_table != NULL, FALSE);
g_return_val_if_fail (self->priv->solver != NULL, FALSE);
guint i;
#pragma omp parallel for reduction(+:target_sites_len)
for (i = 0; i < self->priv->targets->len; i++)
{
target_sites_len += mirbooking_sequence_get_sequence_length (g_ptr_array_index (self->priv->targets, i));
}
// prepare an contiguous array
self->priv->target_sites = g_array_sized_new (FALSE,
FALSE,
sizeof (MirbookingTargetSite),
target_sites_len);
// automatically clear the target sites
g_array_set_clear_func (self->priv->target_sites,
(GDestroyNotify) mirbooking_target_site_clear);
// bookkeep each target site
self->priv->target_sites_by_target = g_hash_table_new ((GHashFunc) mirbooking_sequence_hash,
(GEqualFunc) mirbooking_sequence_equal);
// intialize sites
for (i = 0; i < self->priv->targets->len; i++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
g_hash_table_insert (self->priv->target_sites_by_target,
target,
&g_array_index (self->priv->target_sites, MirbookingTargetSite, self->priv->target_sites->len));
gsize seq_len = mirbooking_sequence_get_sequence_length (MIRBOOKING_SEQUENCE (target));
gsize position;
for (position = 0; position < seq_len; position++)
{
MirbookingTargetSite target_site;
target_site.target = g_object_ref (target);
target_site.position = position;
target_site.occupants = NULL;
g_array_append_val (self->priv->target_sites, target_site);
}
}
// memoize in a array-friendly way the target sites
self->priv->target_sites_by_target_index = g_ptr_array_new ();
for (i = 0; i < self->priv->targets->len; i++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
MirbookingTargetSite *target_site = g_hash_table_lookup (self->priv->target_sites_by_target, target);
g_ptr_array_add (self->priv->target_sites_by_target_index,
target_site);
}
// memoize score vectors
self->priv->target_positions = g_array_sized_new (FALSE,
FALSE,
sizeof (MirbookingTargetPositions),
self->priv->targets->len * self->priv->mirnas->len);
g_array_set_clear_func (self->priv->target_positions,
(GDestroyNotify) mirbooking_target_positions_clear);
g_array_set_size (self->priv->target_positions,
self->priv->targets->len * self->priv->mirnas->len);
// compute scores
gsize occupants_len = 0;
guint j;
gboolean anyerror = FALSE;
#pragma omp parallel for collapse(2) reduction(+:occupants_len)
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
MirbookingMirna *mirna = g_ptr_array_index (self->priv->mirnas, j);
MirbookingTargetPositions *seed_positions = &g_array_index (self->priv->target_positions, MirbookingTargetPositions, i * self->priv->mirnas->len + j);
anyerror |= !mirbooking_score_table_compute_positions (MIRBOOKING_SCORE_TABLE (self->priv->score_table),
mirna,
target,
&seed_positions->positions,
&seed_positions->positions_len,
error);
occupants_len += seed_positions->positions_len;
}
}
if (anyerror)
{
return FALSE;
}
g_debug ("Number of duplexes: %lu", occupants_len);
// pre-allocate occupants in contiguous memory
self->priv->occupants = g_array_sized_new (FALSE, FALSE, sizeof (MirbookingOccupant), occupants_len);
g_array_set_size (self->priv->occupants, occupants_len);
// intitialize occupants
MirbookingOccupant *occupants = (MirbookingOccupant*) self->priv->occupants->data;
gint k = 0;
#pragma omp parallel for collapse(2) ordered
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingMirna *mirna = g_ptr_array_index (self->priv->mirnas, j);
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
MirbookingTargetPositions *seed_positions = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
i * self->priv->mirnas->len + j);
gint _k;
#pragma omp ordered
{
_k = k;
k += seed_positions->positions_len;
}
seed_positions->occupants = g_ptr_array_sized_new (seed_positions->positions_len);
guint p;
for (p = 0; p < seed_positions->positions_len; p++)
{
MirbookingTargetSite *target_site = &target_sites[seed_positions->positions[p]];
g_assert_cmpint (target_site->position, ==, seed_positions->positions[p]);
MirbookingScore score = {0};
anyerror |= !mirbooking_score_table_compute_score (self->priv->score_table,
mirna,
target_site->target,
seed_positions->positions[p],
&score,
error);
mirbooking_occupant_init (&occupants[_k + p],
target_site->target,
seed_positions->positions[p],
mirna,
score);
/*
* Multiple microRNA might share this target site and prepended
* at once.
*/
#pragma omp critical
target_site->occupants = g_slist_prepend (target_site->occupants, &occupants[_k + p]);
g_ptr_array_add (seed_positions->occupants, &occupants[_k + p]);
}
}
}
if (anyerror)
{
return FALSE;
}
g_assert_cmpint (self->priv->occupants->len, ==, occupants_len);
self->priv->targets_ktr = g_new0 (gdouble, self->priv->targets->len);
self->priv->mirnas_ktr = g_new0 (gdouble, self->priv->mirnas->len);
self->priv->y_len = self->priv->mirnas->len + self->priv->targets->len + self->priv->occupants->len + self->priv->targets->len;
// state of the system
self->priv->y = g_new0 (gdouble, self->priv->y_len);
// add shortcuts
self->priv->E = self->priv->y;
self->priv->S = self->priv->E + self->priv->mirnas->len;
self->priv->ES = self->priv->S + self->priv->targets->len;
self->priv->P = self->priv->ES + self->priv->occupants->len;
// setup initial conditions
#pragma omp parallel for
for (j = 0; j < self->priv->mirnas->len; j++)
{
gdouble q = gfloat_from_gpointer (g_hash_table_lookup (self->priv->quantification,
g_ptr_array_index (self->priv->mirnas, j)));
self->priv->E[j] = q;
}
#pragma omp parallel for
for (i = 0; i < self->priv->targets->len; i++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
gdouble q = gfloat_from_gpointer (g_hash_table_lookup (self->priv->quantification,
target));
self->priv->S[i] = q;
}
// allocate memory for the integrator and the solver
self->priv->F = g_new0 (gdouble, self->priv->y_len);
// add shortcuts
self->priv->dEdt = self->priv->F;
self->priv->dSdt = self->priv->dEdt + self->priv->mirnas->len;
self->priv->dESdt = self->priv->dSdt + self->priv->targets->len;
self->priv->dPdt = self->priv->dESdt + self->priv->occupants->len;
self->priv->ES_delta = g_new0 (gdouble, self->priv->occupants->len);
// integrator
self->priv->integrator = odeint_integrator_new (ODEINT_METHOD_DORMAND_PRINCE,
&self->priv->t,
self->priv->y,
self->priv->y_len,
ODEINT_INTEGRATOR_DEFAULT_RTOL,
ODEINT_INTEGRATOR_DEFAULT_ATOL);
g_debug ("Prepared the first step in %lums", 1000 * (g_get_monotonic_time () - prepare_begin) / G_USEC_PER_SEC);
return TRUE;
}
static gsize
absdiff (gsize a, gsize b)
{
return MAX (a, b) - MIN (a, b);
}
static gdouble
_compute_kother (MirbookingBroker *self,
gsize i,
gsize position,
const gdouble *ES,
gdouble St)
{
gdouble kother = 0;
guint j;
for (j = 0; j < self->priv->mirnas->len; j++)
{
// all the position of the other microrna
MirbookingTargetPositions *tss = &g_array_index (self->priv->target_positions, MirbookingTargetPositions, self->priv->mirnas->len * i + j);
guint q;
for (q = 0; q < tss->positions_len; q++)
{
if (absdiff (tss->positions[q], position) > (self->priv->prime5_footprint + self->priv->prime3_footprint))
{
MirbookingOccupant *occupant_q = g_ptr_array_index (tss->occupants, q);
kother += occupant_q->score.kcat * (_mirbooking_broker_get_occupant_quantity (self, occupant_q, ES) / St);
}
}
}
return kother;
}
/*
* Compute the system state.
*/
static void
_compute_F (double t, const double *y, double *F, void *user_data)
{
MirbookingBroker *self = user_data;
const gdouble *E = y;
const gdouble *S = E + self->priv->mirnas->len;
const gdouble *ES = S + self->priv->targets->len;
const gdouble *P = ES + self->priv->occupants->len;
gdouble *dEdt = F;
gdouble *dSdt = dEdt + self->priv->mirnas->len;
gdouble *dESdt = dSdt + self->priv->targets->len;
gdouble *dPdt = dESdt + self->priv->occupants->len;
gsize prime5_footprint = self->priv->prime5_footprint;
gsize prime3_footprint = self->priv->prime3_footprint;
// basic transcription and degradation
// dEdt = ktr - KDEGE * [E]
cblas_dcopy (self->priv->mirnas->len,
self->priv->mirnas_ktr,
1,
dEdt,
1);
cblas_daxpy (self->priv->mirnas->len,
-KDEGE,
E,
1,
dEdt,
1);
// dSdt = ktr - KDEGS * [S]
cblas_dcopy (self->priv->targets->len,
self->priv->targets_ktr,
1,
dSdt,
1);
cblas_daxpy (self->priv->targets->len,
-KDEGS,
S,
1,
dSdt,
1);
// dPdt = -KDEGP * P
cblas_dcopy (self->priv->targets->len,
P,
1,
dPdt,
1);
cblas_dscal (self->priv->targets->len,
-KDEGP,
dPdt,
1);
guint i, j;
#pragma omp parallel for collapse(2)
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
g_assert (target_sites->target == target);
g_assert_cmpint (target_sites->position, ==, 0);
MirbookingMirna *mirna = g_ptr_array_index (self->priv->mirnas, j);
// fetch free energies for candidate MREs
MirbookingTargetPositions *seed_positions = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
self->priv->mirnas->len * i + j);
guint p;
for (p = 0; p < seed_positions->positions_len; p++)
{
MirbookingOccupant *occupant = g_ptr_array_index (seed_positions->occupants, p);
MirbookingTargetSite *target_site = &target_sites[seed_positions->positions[p]];
gsize k = _mirbooking_broker_get_occupant_index (self, occupant);
g_assert (target_site->target == target);
g_assert_cmpint (target_site->position, ==, seed_positions->positions[p]);
g_assert (occupant->mirna == mirna);
gdouble kf = occupant->score.kf;
gdouble kr = occupant->score.kr;
gdouble kcat = occupant->score.kcat;
gdouble Stp = S[i] * _mirbooking_broker_get_target_site_vacancy (self,
target_site,
prime5_footprint,
prime3_footprint,
S[i],
ES);
gdouble kother = _compute_kother (self, i, seed_positions->positions[p], ES, S[i]);
#pragma omp atomic
dEdt[j] += -kf * E[j] * Stp + kr * ES[k] + kcat * ES[k] + kother * ES[k];
#pragma omp atomic
dSdt[i] += - kcat * ES[k];
dESdt[k] = kf * E[j] * Stp - kr * ES[k] - kcat * ES[k] - kother * ES[k];
#pragma omp atomic
dPdt[i] += kcat * ES[k];
}
}
}
}
static void
_prepare_J (MirbookingBroker *self)
{
self->priv->J = g_new0 (SparseMatrix, 1);
// count nnz entries in the Jacobian
gsize nnz = 0;
guint i, j;
#pragma omp parallel for collapse(2) reduction(+:nnz)
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
MirbookingTargetPositions *seed_scores = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
i * self->priv->mirnas->len + j);
guint p;
for (p = 0; p < seed_scores->positions_len; p++)
{
// substitute targets
guint z;
for (z = 0; z < self->priv->targets->len; z++)
{
MirbookingTargetPositions *alternative_seed_scores = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
z * self->priv->mirnas->len + j);
nnz += alternative_seed_scores->positions_len;
}
// substitute miRNAs (excluding this one as we consider it as a substitute target)
nnz += g_slist_length (target_sites[seed_scores->positions[p]].occupants) - 1;
}
}
}
g_debug ("nnz: %lu, sparsity: %.2f%%", nnz, 100.0 * (1.0 - (gdouble) nnz / pow (self->priv->occupants->len, 2)));
size_t shape[2] = {self->priv->occupants->len, self->priv->occupants->len};
sparse_matrix_init (self->priv->J,
self->priv->sparse_solver == MIRBOOKING_BROKER_SPARSE_SOLVER_LAPACK ? SPARSE_MATRIX_STORAGE_DENSE : SPARSE_MATRIX_STORAGE_CSR,
SPARSE_MATRIX_TYPE_DOUBLE,
shape,
nnz);
self->priv->J->hints |= SPARSE_MATRIX_HINT_SYMMETRIC_STRUCTURE;
self->priv->J->hints |= SPARSE_MATRIX_HINT_POSITIVE_DEFINITE;
// initialize the sparse slots beforehand because it is not thread-safe and
// we want to keep the in order for fast access
// TODO: find a way to remove the ordered clause
#pragma omp parallel for collapse(2) ordered
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
MirbookingTargetPositions *seed_scores = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
i * self->priv->mirnas->len + j);
guint p;
for (p = 0; p < seed_scores->positions_len; p++)
{
// footprint interactions
MirbookingTargetSite *target_site = &target_sites[seed_scores->positions[p]];
MirbookingOccupant *occupant = g_ptr_array_index (seed_scores->occupants, p);
gsize colind[self->priv->J->shape[0]];
gsize row_nnz = 0;
// substitute target
guint z;
for (z = 0; z < self->priv->targets->len; z++)
{
MirbookingTargetPositions *alternative_seed_scores = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
z * self->priv->mirnas->len + j);
guint w;
for (w = 0; w < alternative_seed_scores->occupants->len; w++)
{
MirbookingOccupant *other_occupant = g_ptr_array_index (alternative_seed_scores->occupants,
w);
colind[row_nnz++] = _mirbooking_broker_get_occupant_index (self, other_occupant);
}
}
// substitute miRNAs
GSList *occupant_list;
for (occupant_list = target_site->occupants; occupant_list != NULL; occupant_list = occupant_list->next)
{
MirbookingOccupant *other_occupant = occupant_list->data;
if (other_occupant->mirna != g_ptr_array_index (self->priv->mirnas, j))
{
colind[row_nnz++] = _mirbooking_broker_get_occupant_index (self, other_occupant);
}
}
// sort colind
qsort (colind, row_nnz, sizeof (gsize), cmp_gsize);
#pragma omp ordered
sparse_matrix_reserve_range (self->priv->J,
_mirbooking_broker_get_occupant_index (self, occupant),
colind,
row_nnz);
}
}
}
}
/*
* Compute the system Jacobian.
*/
static void
_compute_J (double t, const double *y, SparseMatrix *J, void *user_data)
{
MirbookingBroker *self = user_data;
const gdouble *E = y;
const gdouble *S = y + self->priv->mirnas->len;
const gdouble *ES = S + self->priv->targets->len;
gsize prime5_footprint = self->priv->prime5_footprint;
gsize prime3_footprint = self->priv->prime3_footprint;
guint i, j;
#pragma omp parallel for collapse(2)
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
g_assert (target_sites->target == target);
g_assert_cmpint (target_sites->position, ==, 0);
MirbookingMirna *mirna = g_ptr_array_index (self->priv->mirnas, j);
// fetch free energies for candidate MREs
MirbookingTargetPositions *seed_positions = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
self->priv->mirnas->len * i + j);
guint p;
for (p = 0; p < seed_positions->positions_len; p++)
{
MirbookingOccupant *occupant = g_ptr_array_index (seed_positions->occupants, p);
MirbookingTargetSite *target_site = &target_sites[seed_positions->positions[p]];
gsize k = _mirbooking_broker_get_occupant_index (self, occupant);
g_assert (target_site->target == target);
g_assert_cmpint (target_site->position, ==, seed_positions->positions[p]);
g_assert (occupant->mirna == mirna);
gdouble kf = occupant->score.kf;
gdouble kr = occupant->score.kr;
gdouble kcat = occupant->score.kcat;
gdouble Stp = S[i] * _mirbooking_broker_get_target_site_vacancy (self,
target_site,
prime5_footprint,
prime3_footprint,
S[i],
ES);
// substitute target for the microRNA
guint z;
for (z = 0; z < self->priv->targets->len; z++)
{
MirbookingTargetPositions *alternative_seed_positions = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
z * self->priv->mirnas->len + j);
guint w;
for (w = 0; w < alternative_seed_positions->occupants->len; w++)
{
MirbookingOccupant *other_occupant = g_ptr_array_index (alternative_seed_positions->occupants, w);
g_assert (other_occupant->mirna == g_ptr_array_index (self->priv->mirnas, j));
gsize other_k = _mirbooking_broker_get_occupant_index (self, other_occupant);
gdouble kother = 0;
if (occupant == other_occupant)
{
kother = _compute_kother (self, i, seed_positions->positions[p], ES, S[i]);
}
else if (i == z && absdiff (seed_positions->positions[p], alternative_seed_positions->positions[w]) > (self->priv->prime5_footprint + self->priv->prime3_footprint))
{
/*
* Here, we account for the kother if a microRNA is
* shared for the pair of complexes because it's
* essentially free and speeds up the convergence.
*
* Ideally we would do if for all pair of
* complexes, but it has a combinatorial cost.
*/
kother = occupant->score.kcat * (_mirbooking_broker_get_occupant_quantity (self, occupant, ES) / self->priv->S[i]);
}
gdouble dEdES = -1; // always
gdouble dSdES = (z == i && seed_positions->positions[p] == alternative_seed_positions->positions[w]) ? -1 : 0;
gdouble dESdES = kf * (E[j] * dSdES + Stp * dEdES) - (kr + kcat) * (occupant == other_occupant ? 1 : 0) - kother;
sparse_matrix_set_double (J,
k,
other_k,
-dESdES);
}
}
// substitute miRNA for the target
GSList *other_occupant_list;
for (other_occupant_list = target_site->occupants; other_occupant_list != NULL; other_occupant_list = other_occupant_list->next)
{
MirbookingOccupant *other_occupant = other_occupant_list->data;
gsize other_k = _mirbooking_broker_get_occupant_index (self, other_occupant);
gdouble kother = occupant == other_occupant ? _compute_kother (self, i, seed_positions->positions[p], ES, S[i]) : 0;
gdouble dEdES = occupant->mirna == other_occupant->mirna ? -1 : 0;
gdouble dSdES = -1;
gdouble dESdES = kf * (E[j] * dSdES + Stp * dEdES) - (kr + kcat) * (occupant == other_occupant ? 1 : 0) - kother;
sparse_matrix_set_double (J,
k,
other_k,
-dESdES);
}
}
}
}
}
/**
* mirbooking_broker_evaluate:
* @self: A #MirbookingBroker
* @error_ratio: (out) (optional): Error-to-tolerance ratio
*
* Evaluate the current state of the system.
*
* Returns: %TRUE if the evaluation is successful, otherwise %FALSE and @error
* is set accordingly
*/
gboolean
mirbooking_broker_evaluate (MirbookingBroker *self,
gdouble *error_ratio,
GError **error)
{
if (g_once_init_enter (&self->priv->init))
{
g_return_val_if_fail (_mirbooking_broker_prepare_step (self, error),
FALSE);
g_once_init_leave (&self->priv->init, 1);
}
_compute_F (self->priv->t,
self->priv->y,
self->priv->F,
self);
if (error_ratio)
{
gdouble _error_ratio = 0;
gsize i;
for (i = 0; i < self->priv->y_len; i++)
{
_error_ratio = fmax (_error_ratio, fabs (self->priv->F[i]) / (RTOL * fabs (self->priv->y[i]) + ATOL));
}
*error_ratio = _error_ratio;
}
return TRUE;
}
/**
* mirbooking_broker_step:
* @step_mode: A #MirbookingBrokerStepMode
* @step_size: A step size for integration or a fraction of the step for the
* Newton-Raphson update
*
* Perform a step based on the current state of the system.
*
* Returns: %TRUE on success, otherwise @error is set
*/
gboolean
mirbooking_broker_step (MirbookingBroker *self,
MirbookingBrokerStepMode step_mode,
gdouble step_size,
GError **error)
{
if (g_once_init_enter (&self->priv->init))
{
g_return_val_if_fail (_mirbooking_broker_prepare_step (self, error),
FALSE);
g_once_init_leave (&self->priv->init, 1);
}
if (step_mode == MIRBOOKING_BROKER_STEP_MODE_SOLVE_STEADY_STATE)
{
if (self->priv->rank == 0)
{
_compute_F (self->priv->t,
self->priv->y,
self->priv->F,
self);
if (self->priv->J == NULL)
{
_prepare_J (self);
}
_compute_J (self->priv->t,
self->priv->y,
self->priv->J,
self);
}
gboolean ret;
ret = sparse_solver_solve (self->priv->solver,
self->priv->J,
self->priv->ES_delta,
self->priv->dESdt);
if (!ret)
{
g_set_error_literal (error,
MIRBOOKING_ERROR,
MIRBOOKING_ERROR_FAILED,
"The solve step has failed.");
return FALSE;
}
SparseSolverStatistics stats = sparse_solver_get_statistics (self->priv->solver);
g_debug ("reorder-time: %fs factor-time: %fs solve-time: %fs flops: %f", stats.reorder_time, stats.factor_time, stats.solve_time, stats.flops);
// apply the update
guint i, j;
#pragma omp parallel for collapse(2)
for (i = 0; i < self->priv->targets->len; i++)
{
for (j = 0; j < self->priv->mirnas->len; j++)
{
MirbookingTarget *target = g_ptr_array_index (self->priv->targets, i);
MirbookingTargetSite *target_sites = g_ptr_array_index (self->priv->target_sites_by_target_index,
i);
g_assert (target_sites->target == target);
g_assert_cmpint (target_sites->position, ==, 0);
MirbookingMirna *mirna = g_ptr_array_index (self->priv->mirnas, j);
// fetch free energies for candidate MREs
MirbookingTargetPositions *seed_positions = &g_array_index (self->priv->target_positions,
MirbookingTargetPositions,
self->priv->mirnas->len * i + j);
guint p;
for (p = 0; p < seed_positions->positions_len; p++)
{
MirbookingOccupant *occupant = g_ptr_array_index (seed_positions->occupants, p);
g_assert (occupant->mirna == mirna);
gsize k = _mirbooking_broker_get_occupant_index (self, occupant);
#pragma omp atomic
self->priv->E[j] -= step_size * self->priv->ES_delta[k];
self->priv->ES[k] += step_size * self->priv->ES_delta[k];
}
}
}
// Under the steady-state assumption, all substrate degradation is
// compensated by transcription of new targets.
// The system must be however reevaluated as we have applied an update.
_compute_F (self->priv->t,
self->priv->y,
self->priv->F,
self);
{
// ktr = ktr - dEdt
cblas_daxpy (self->priv->mirnas->len,
-1,
self->priv->dEdt,
1,
self->priv->mirnas_ktr,
1);
// ktr = ktr - dSdt
cblas_daxpy (self->priv->targets->len,
-1,
self->priv->dSdt,
1,
self->priv->targets_ktr,
1);
// P = -(dSdt - ktr) / KDEGP
// P = dSdt; P = -ktr + P; P = -1/KDEGP * P
cblas_dcopy (self->priv->targets->len,
self->priv->dSdt,
1,
self->priv->P,
1);
cblas_daxpy (self->priv->targets->len,
-1,
self->priv->targets_ktr,
1,
self->priv->P,
1);
cblas_dscal (self->priv->targets->len,
-1.0 / KDEGP,
self->priv->P,
1);
}
}
else if (step_mode == MIRBOOKING_BROKER_STEP_MODE_INTEGRATE)
{
odeint_integrator_integrate (self->priv->integrator,
_compute_F,
self,
self->priv->t + step_size);
}
else
{
g_assert_not_reached ();
}
return TRUE;
}
/**
* mirbooking_broker_get_mirna_transcription_rate:
*
* Get the rate of transcription of the given #MirbookingTarget.
*
* This is resolved when stepping with @MIRBOOKING_BROKER_STEP_MODE_SOLVE_STEADY_STATE
* using the steady-state assumption.
*/
gdouble
mirbooking_broker_get_mirna_transcription_rate (MirbookingBroker *self,
MirbookingMirna *mirna)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->mirnas, mirna, (GEqualFunc) mirbooking_sequence_equal, &i), 0);
return self->priv->mirnas_ktr[i];
}
/**
* mirbooking_broker_set_mirna_transcription_rate:
*
* Set the rate of transcription of @mirna to @transcription_rate.
*/
void
mirbooking_broker_set_mirna_transcription_rate (MirbookingBroker *self, MirbookingMirna *mirna, gdouble transcription_rate)
{
g_return_if_fail (self->priv->init);
g_return_if_fail (transcription_rate >= 0);
guint i;
g_return_if_fail (g_ptr_array_find_with_equal_func (self->priv->mirnas, mirna, (GEqualFunc) mirbooking_sequence_equal, &i));
self->priv->mirnas_ktr[i] = transcription_rate;
}
/**
* mirbooking_broker_get_mirna_degradation_rate:
*
* Obtain @mirna degradation rate.
*/
gdouble
mirbooking_broker_get_mirna_degradation_rate (MirbookingBroker *self,
MirbookingMirna *mirna)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->mirnas, mirna, (GEqualFunc) mirbooking_sequence_equal, &i), 0.0);
return KDEGE * self->priv->E[i];
}
/**
* mirbooking_broker_get_target_transcription_rate:
*
* Get the rate of transcription of the given #MirbookingTarget.
*
* This is resolved when stepping with @MIRBOOKING_BROKER_STEP_MODE_SOLVE_STEADY_STATE
* using the steady-state assumption.
*/
gdouble
mirbooking_broker_get_target_transcription_rate (MirbookingBroker *self,
MirbookingTarget *target)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i), 0);
return self->priv->targets_ktr[i];
}
/**
* mirbooking_broker_get_target_degradation_rate:
*
* Get the rate of degradation of the given #MirbookingTarget.
*/
gdouble
mirbooking_broker_get_target_degradation_rate (MirbookingBroker *self,
MirbookingTarget *target)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i), 0);
return KDEGS * self->priv->S[i];
}
/**
* mirbooking_broker_set_target_transcription_rate:
*
* Set the rate of transcription of @target to @transcription_rate.
*/
void
mirbooking_broker_set_target_transcription_rate (MirbookingBroker *self, MirbookingTarget *target, gdouble transcription_rate)
{
g_return_if_fail (self->priv->init);
g_return_if_fail (transcription_rate >= 0);
guint i;
g_return_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i));
self->priv->targets_ktr[i] = transcription_rate;
}
/**
* mirbooking_broker_get_product_degradation_rate:
*
* Get the rate of product degradation.
*
* This is resolved when stepping with @MIRBOOKING_BROKER_STEP_MODE_SOLVE_STEADY_STATE
* using the steady-state assumption.
*/
gdouble
mirbooking_broker_get_product_degradation_rate (MirbookingBroker *self,
MirbookingTarget *target)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target, (GEqualFunc) mirbooking_sequence_equal, &i), 0);
return KDEGP * self->priv->P[i];
}
/**
* mirbooking_broker_get_target_sites:
*
* Obtain the computed #MirbookingTargetSite array by this #MirbookingBroker.
*
* Returns: (element-type MirbookingTargetSite) (transfer none): A view of the
* computed #MirbookingTargetSite
*/
const GArray *
mirbooking_broker_get_target_sites (MirbookingBroker *self)
{
g_return_val_if_fail (self != NULL, NULL);
g_return_val_if_fail (self->priv->init, NULL);
return self->priv->target_sites;
}
/**
* mirbooking_broker_get_target_site_quantity:
*
* Obtain the expected concentration of a #MirbookingTargetSite which account
* for overlapping miRISC in the neighborhood.
*/
gdouble
mirbooking_broker_get_target_site_quantity (MirbookingBroker *self, const MirbookingTargetSite *target_site)
{
g_return_val_if_fail (self->priv->init, 0.0);
gdouble St = mirbooking_broker_get_sequence_quantity (self, MIRBOOKING_SEQUENCE (target_site->target));
return _mirbooking_broker_get_target_site_vacancy (self,
target_site,
self->priv->prime5_footprint,
self->priv->prime3_footprint,
St,
self->priv->ES) * St;
}
/**
* mirbooking_broker_get_target_site_occupants_quantity:
*
* Obtain the total concentration of occupants occupying this target site
* either directly or indirectly via the footprint.
*/
gdouble
mirbooking_broker_get_target_site_occupants_quantity (MirbookingBroker *self,
const MirbookingTargetSite *target_site)
{
gdouble total_occupants_quantity = 0;
const MirbookingTargetSite *fts, *tts;
_mirbooking_broker_get_footprint_window (self,
target_site,
self->priv->prime3_footprint,
self->priv->prime5_footprint,
&fts, &tts);
const MirbookingTargetSite *ts;
for (ts = fts; ts <= tts; ts++)
{
total_occupants_quantity += _mirbooking_broker_get_target_site_occupants_quantity (self, ts, self->priv->ES);
}
return total_occupants_quantity;
}
/**
* mirbooking_broker_get_target_site_catalytic_rate:
*
* Obtain the target site catalytic rate.
*/
gdouble
mirbooking_broker_get_target_site_kother (MirbookingBroker *self,
const MirbookingTargetSite *target_site)
{
g_return_val_if_fail (self->priv->init, 0.0);
guint i;
g_return_val_if_fail (g_ptr_array_find_with_equal_func (self->priv->targets, target_site->target, (GEqualFunc) mirbooking_sequence_equal, &i), 0.0);
return _compute_kother (self, i, target_site->position, self->priv->ES, self->priv->S[i]);
}
/**
* mirbooking_broker_get_mirnas:
*
* Returns: (element-type MirbookingMirna) (transfer none):
*/
const GPtrArray *
mirbooking_broker_get_mirnas (MirbookingBroker *self)
{
return self->priv->mirnas;
}
/**
* mirbooking_broker_get_targets:
*
* Returns: (element-type MirbookingTarget) (transfer none):
*/
const GPtrArray *
mirbooking_broker_get_targets (MirbookingBroker *self)
{
return self->priv->targets;
}
/**
* mirbooking_broker_get_occupants:
*
* This is much faster to manipulate if the intent is to traverse all the
* complexes regardless of their actual location.
*
* Returns: (element-type MirbookingOccupant) (transfer none): A view over the
* occupants
*/
const GArray *
mirbooking_broker_get_occupants (MirbookingBroker *self)
{
g_return_val_if_fail (self->priv->init, NULL);
return self->priv->occupants;
}
/**
* mirbooking_broker_get_target_occupants_pmf:
* @target: The #MirbookingTarget for which we are retrieving the silencing
* @pmf_len: (out): Length of the PMF which correspond to the number
* of occupied sites plus one
*
* Compute the probability mass function of the number of occupied target sites
* on a given target by modeling them with a Poisson-Binomial distribution.
*
* Returns: (array length=pmf_len): The probability mass function of the number
* of bound miRISC complexes or %NULL if it cannot be computed
*/
gdouble *
mirbooking_broker_get_target_occupants_pmf (MirbookingBroker *self, MirbookingTarget *target, gsize *pmf_len)
{
g_return_val_if_fail (self->priv->init, NULL);
g_return_val_if_fail (g_hash_table_contains (self->priv->quantification, target), NULL);
gfloat target_quantity = mirbooking_broker_get_sequence_quantity (self, MIRBOOKING_SEQUENCE (target));
g_autoptr (GArray) probability_by_position = g_array_new (FALSE, FALSE, sizeof (gdouble));
MirbookingTargetSite *target_site = g_hash_table_lookup (self->priv->target_sites_by_target, target);
while (target_site < &g_array_index (self->priv->target_sites, MirbookingTargetSite, self->priv->target_sites->len) &&
target_site->target == target)
{
gdouble occupants_quantity = _mirbooking_broker_get_target_site_occupants_quantity (self, target_site, self->priv->ES);
if (occupants_quantity > 0)
{
gdouble p = occupants_quantity / target_quantity;
g_array_append_val (probability_by_position, p);
}
++target_site;
}
PoissonBinomial pb;
pb_init (&pb,
(gdouble*) probability_by_position->data,
probability_by_position->len);
gdouble* pmf = g_new (gdouble, 1 + probability_by_position->len);
memcpy (pmf, pb.pmf, (1 + probability_by_position->len) * sizeof (gdouble));
if (pmf_len)
*pmf_len = 1 + probability_by_position->len;
pb_destroy (&pb);
return pmf;
}
/**
* mirbooking_broker_get_target_expressed_fraction:
*
* Obtain the fraction of substrate @target that is expressed.
*/
gdouble
mirbooking_broker_get_target_expressed_fraction (MirbookingBroker *broker, MirbookingTarget *target)
{
gdouble ef = 0;
gdouble lambda = .27;
gsize pmf_len;
g_autofree gdouble *pmf = mirbooking_broker_get_target_occupants_pmf (broker, target, &pmf_len);
gsize i;
for (i = 0; i < pmf_len; i++)
{
ef += exp (-lambda * i) * pmf[i];
}
return ef;
}
static gboolean
write_output_to_tsv (MirbookingBroker *mirbooking,
GOutputStream *out,
GError **error)
{
g_autoptr (GDataOutputStream) output_f = g_data_output_stream_new (out);
gchar *header = "gene_accession\t"
"gene_name\t"
"target_accession\t"
"target_name\t"
"target_quantity\t"
"position\t"
"mirna_accession\t"
"mirna_name\t"
"mirna_quantity\t"
"score\t"
"quantity\n";
if (!g_data_output_stream_put_string (output_f, header, NULL, error))
{
return FALSE;
}
const GArray *target_sites = mirbooking_broker_get_target_sites (mirbooking);
gfloat target_quantity = 0;
const MirbookingTargetSite *target_site;
MirbookingTarget *cur_target = NULL;
for (target_site = &g_array_index (target_sites, MirbookingTargetSite, 0);
target_site < &g_array_index (target_sites, MirbookingTargetSite, target_sites->len);
target_site++)
{
// recompute each time the target changes
if (cur_target != target_site->target)
{
cur_target = target_site->target;
target_quantity = mirbooking_broker_get_sequence_quantity (mirbooking,
MIRBOOKING_SEQUENCE (target_site->target));
}
// report individual occupants
GSList *occupants;
for (occupants = target_site->occupants; occupants != NULL; occupants = occupants->next)
{
MirbookingOccupant *occupant = occupants->data;
g_autofree gchar* line = g_strdup_printf ("%s\t%s\t%s\t%s\t%e\t%lu\t%s\t%s\t%e\t%e\t%e\n",
COALESCE (mirbooking_sequence_get_gene_accession (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
COALESCE (mirbooking_sequence_get_gene_name (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (target_site->target)),
COALESCE (mirbooking_sequence_get_name (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
target_quantity,
target_site->position + 1, // 1-based
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (occupant->mirna)),
COALESCE (mirbooking_sequence_get_name (MIRBOOKING_SEQUENCE (occupant->mirna)), "N/A"),
mirbooking_broker_get_sequence_quantity (mirbooking, MIRBOOKING_SEQUENCE (occupant->mirna)) + mirbooking_broker_get_bound_mirna_quantity (mirbooking, occupant->mirna),
MIRBOOKING_SCORE_KM (occupant->score) + (mirbooking_broker_get_target_site_kother (mirbooking, target_site) / occupant->score.kf),
mirbooking_broker_get_occupant_quantity (mirbooking, occupant));
if (!g_data_output_stream_put_string (output_f, line, NULL, error))
{
return FALSE;
}
}
}
return TRUE;
}
static gboolean
write_output_to_tsv_detailed (MirbookingBroker *mirbooking,
GOutputStream *out,
GError **error)
{
g_autoptr (GDataOutputStream) output_f = g_data_output_stream_new (out);
gchar *header = "gene_accession\t"
"gene_name\t"
"target_accession\t"
"target_name\t"
"target_quantity\t"
"position\t"
"mirna_accession\t"
"mirna_name\t"
"mirna_quantity\t"
"kf\t"
"kr\t"
"kcleave\t"
"krelease\t"
"kcat\t"
"kother\t"
"kd\t"
"km\t"
"quantity\n";
if (!g_data_output_stream_put_string (output_f,
header,
NULL,
error))
{
return FALSE;
}
const GArray *target_sites = mirbooking_broker_get_target_sites (mirbooking);
gfloat target_quantity = 0;
const MirbookingTargetSite *target_site;
MirbookingTarget *cur_target = NULL;
for (target_site = &g_array_index (target_sites, MirbookingTargetSite, 0);
target_site < &g_array_index (target_sites, MirbookingTargetSite, target_sites->len);
target_site++)
{
// recompute each time the target changes
if (cur_target != target_site->target)
{
cur_target = target_site->target;
target_quantity = mirbooking_broker_get_sequence_quantity (mirbooking,
MIRBOOKING_SEQUENCE (target_site->target));
}
// report individual occupants
GSList *occupants;
for (occupants = target_site->occupants; occupants != NULL; occupants = occupants->next)
{
MirbookingOccupant *occupant = occupants->data;
g_autofree gchar *line = g_strdup_printf ("%s\t%s\t%s\t%s\t%e\t%lu\t%s\t%s\t%e\t%e\t%e\t%e\t%e\t%e\t%e\t%e\t%e\t%e\n",
COALESCE (mirbooking_sequence_get_gene_accession (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
COALESCE (mirbooking_sequence_get_gene_name (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (target_site->target)),
COALESCE (mirbooking_sequence_get_name (MIRBOOKING_SEQUENCE (target_site->target)), "N/A"),
target_quantity,
target_site->position + 1, // 1-based
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (occupant->mirna)),
COALESCE (mirbooking_sequence_get_name (MIRBOOKING_SEQUENCE (occupant->mirna)), "N/A"),
mirbooking_broker_get_sequence_quantity (mirbooking, MIRBOOKING_SEQUENCE (occupant->mirna)) + mirbooking_broker_get_bound_mirna_quantity (mirbooking, occupant->mirna),
occupant->score.kf,
occupant->score.kr,
occupant->score.kcleave,
occupant->score.krelease,
occupant->score.kcat,
mirbooking_broker_get_target_site_kother (mirbooking, target_site),
MIRBOOKING_SCORE_KD (occupant->score),
MIRBOOKING_SCORE_KM (occupant->score) + (mirbooking_broker_get_target_site_kother (mirbooking, target_site) / occupant->score.kf),
mirbooking_broker_get_occupant_quantity (mirbooking, occupant));
if (!g_data_output_stream_put_string (output_f, line, NULL, error))
{
return FALSE;
}
}
}
return TRUE;
}
static gboolean
write_output_to_gff3 (MirbookingBroker *mirbooking, GOutputStream *out, GError **error)
{
g_autoptr (GDataOutputStream) output_f = g_data_output_stream_new (out);
if (!g_data_output_stream_put_string (output_f, "##gff-version 3\n", NULL, error))
{
return FALSE;
}
const GArray *target_sites = mirbooking_broker_get_target_sites (mirbooking);
gint i = 1;
const MirbookingTargetSite *target_site;
for (target_site = &g_array_index (target_sites, MirbookingTargetSite, 0);
target_site < &g_array_index (target_sites, MirbookingTargetSite, target_sites->len);
target_site++)
{
// report individual occupants
GSList *occupants;
for (occupants = target_site->occupants; occupants != NULL; occupants = occupants->next)
{
// the sequence ontology for 'miRNA_target_site' is 'SO:0000934'
MirbookingOccupant *occupant = occupants->data;
g_autofree gchar *line = g_strdup_printf ("%s\tmiRBooking\tmiRNA_target_site\t%lu\t%lu\t%e\t.\t.\tID=%d;Name=%s;Alias=%s\n",
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (target_site->target)),
(gsize) MAX (1, (gssize) target_site->position + 1 - (gssize) mirbooking->priv->prime5_footprint),
MIN (mirbooking_sequence_get_sequence_length (MIRBOOKING_SEQUENCE (target_site->target)), target_site->position + 1 + mirbooking->priv->prime3_footprint),
mirbooking_broker_get_occupant_quantity (mirbooking, occupant),
i++,
mirbooking_sequence_get_name (MIRBOOKING_SEQUENCE (occupant->mirna)),
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (occupant->mirna)));
if (!g_data_output_stream_put_string (output_f,
line,
NULL,
error))
{
return FALSE;
}
}
}
return TRUE;
}
static gboolean
write_output_to_wiggle (MirbookingBroker *broker, GOutputStream *out, GError **error)
{
g_autoptr (GDataOutputStream) output_f = g_data_output_stream_new (out);
const GArray *target_sites = mirbooking_broker_get_target_sites (broker);
if (!g_data_output_stream_put_string (output_f, "track type=wiggle_0\n", NULL, error))
{
return FALSE;
}
MirbookingTarget *target = NULL;
const MirbookingTargetSite *target_site;
for (target_site = &g_array_index (target_sites, MirbookingTargetSite, 0);
target_site < &g_array_index (target_sites, MirbookingTargetSite, target_sites->len);
target_site++)
{
if (target != target_site->target)
{
g_autofree gchar *line = g_strdup_printf ("variableStep chrom=%s\n",
mirbooking_sequence_get_accession (MIRBOOKING_SEQUENCE (target_site->target)));
if (!g_data_output_stream_put_string (output_f,
line,
NULL,
error))
{
return FALSE;
}
target = target_site->target;
}
gdouble St = mirbooking_broker_get_sequence_quantity (broker, MIRBOOKING_SEQUENCE (target_site->target));
gdouble Stp = mirbooking_broker_get_target_site_occupants_quantity (broker, target_site);
// only report positions with activity
if (Stp > 0)
{
g_autofree gchar *line = g_strdup_printf ("%lu %f\n",
target_site->position + 1,
Stp / St);
if (!g_data_output_stream_put_string (output_f,
line,
NULL,
error))
{
return FALSE;
}
}
}
return TRUE;
}
typedef struct _MirbookingBrokerOutputFormatMeta
{
MirbookingBrokerOutputFormat output_format;
const gchar *nick;
gboolean (*write) (MirbookingBroker *broker, GOutputStream *output, GError **error);
} MirbookingBrokerOutputFormatMeta;
static const MirbookingBrokerOutputFormatMeta OUTPUT_FORMAT_META[] =
{
{MIRBOOKING_BROKER_OUTPUT_FORMAT_TSV, "tsv", write_output_to_tsv},
{MIRBOOKING_BROKER_OUTPUT_FORMAT_TSV_DETAILED, "tsv-detailed", write_output_to_tsv_detailed},
{MIRBOOKING_BROKER_OUTPUT_FORMAT_GFF3, "gff3", write_output_to_gff3},
{MIRBOOKING_BROKER_OUTPUT_FORMAT_WIG, "wig", write_output_to_wiggle}
};
gboolean
mirbooking_broker_write_output_to_stream (MirbookingBroker *self,
GOutputStream *out,
MirbookingBrokerOutputFormat output_format,
GError **error)
{
g_return_val_if_fail (self->priv->init, FALSE);
gsize i;
for (i = 0; i < sizeof (OUTPUT_FORMAT_META); i++)
{
if (OUTPUT_FORMAT_META[i].output_format == output_format)
{
return OUTPUT_FORMAT_META[i].write (self,
out,
error);
}
}
g_return_val_if_reached (FALSE);
}
gboolean
mirbooking_broker_write_output_to_file (MirbookingBroker *self,
GFile *output_file,
MirbookingBrokerOutputFormat output_format,
GError **error)
{
g_autoptr (GOutputStream) out = G_OUTPUT_STREAM (g_file_replace (output_file,
NULL,
FALSE,
G_FILE_CREATE_NONE,
NULL,
error));
g_return_val_if_fail (out != NULL, FALSE);
return mirbooking_broker_write_output_to_stream (self,
out,
output_format,
error);
}
|
LAGraph_BF_full2.c | //------------------------------------------------------------------------------
// LAGraph_BF_full2.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0, using the
// frontier idea from Roi Lipman
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_BF_full2: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full2 performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full2 returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#define LAGraph_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&dfrontier); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I); \
LAGraph_Free ((void**)&J); \
LAGraph_Free ((void**)&w); \
LAGraph_Free ((void**)&W); \
LAGraph_Free ((void**)&h); \
LAGraph_Free ((void**)&pi); \
}
#define LAGraph_FREE_ALL \
{ \
LAGraph_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF2_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF2_lMIN2
(
BF2_Tuple3_struct *z,
const BF2_Tuple3_struct *x,
const BF2_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF2_PLUSrhs2
(
BF2_Tuple3_struct *z,
const BF2_Tuple3_struct *x,
const BF2_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF2_EQ
(
bool *z,
const BF2_Tuple3_struct *x,
const BF2_Tuple3_struct *y
)
{
if (x->w == y->w && x->h == y->h && x->pi == y->pi)
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full2
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL, dfrontier = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF2_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGRAPH_OK (GrB_Matrix_nrows (&nrows, A)) ;
LAGRAPH_OK (GrB_Matrix_ncols (&ncols, A)) ;
LAGRAPH_OK (GrB_Matrix_nvals (&nz, A));
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF2_Tuple3_struct)));
// GrB_BinaryOp
LAGRAPH_OK (GrB_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF2_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF2_lMIN2),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF2_PLUSrhs2),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF2_Tuple3_struct BF_identity = (BF2_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGRAPH_OK (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_Malloc (nz, sizeof(double)) ;
W = LAGraph_Malloc (nz, sizeof(BF2_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads;
LAGRAPH_OK( LAGraph_GetNumThreads (&nthreads, NULL)) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF2_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF2_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
LAGRAPH_OK (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGraph_Free ((void**)&I);
LAGraph_Free ((void**)&J);
LAGraph_Free ((void**)&W);
LAGraph_Free ((void**)&w);
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Vector_new(&d, BF_Tuple3, n));
// initial distance from s to itself
BF2_Tuple3_struct d0 = (BF2_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
LAGRAPH_OK (GrB_Vector_dup(&dtmp, d));
LAGRAPH_OK (GrB_Vector_dup(&dfrontier, d));
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGRAPH_OK (GrB_vxm(dfrontier, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL));
// dtmp[i] = min(d[i], dfrontier[i]).
GrB_Vector_eWiseAdd_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3,
d, dfrontier, GrB_NULL);
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
LAGRAPH_OK (GrB_vxm(dfrontier, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL));
// dtmp[i] = min(d[i], dfrontier[i]).
GrB_Vector_eWiseAdd_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3,
d, dfrontier, GrB_NULL);
// if d != dtmp, then there is a negative-weight cycle in the graph
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LAGraph_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_Malloc (n, sizeof(BF2_Tuple3_struct)) ;
w = LAGraph_Malloc (n, sizeof(double)) ;
h = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d));
for (GrB_Index k = 0; k < nz; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGRAPH_OK (GrB_Vector_new(pd_output, GrB_FP64, n));
LAGRAPH_OK (GrB_Vector_new(ppi_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_new(ph_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_build (*pd_output , I, w , nz,GrB_MIN_FP64 ));
LAGRAPH_OK (GrB_Vector_build (*ppi_output, I, pi, nz,GrB_MIN_UINT64));
LAGRAPH_OK (GrB_Vector_build (*ph_output , I, h , nz,GrB_MIN_UINT64));
LAGraph_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.incubator.apache.org/versions/master/faq/env_var.html "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
DynamicGraph.h | /*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DYNAMICGRAPH_H
#define DYNAMICGRAPH_H
#include "../DataStructures/DeallocatingVector.h"
#include <boost/assert.hpp>
#include <boost/range/irange.hpp>
#include <cstdint>
#include <algorithm>
#include <limits>
#include <vector>
template <typename EdgeDataT> class DynamicGraph
{
public:
typedef decltype(boost::irange(0u,0u)) EdgeRange;
typedef EdgeDataT EdgeData;
typedef unsigned NodeIterator;
typedef unsigned EdgeIterator;
class InputEdge
{
public:
NodeIterator source;
NodeIterator target;
EdgeDataT data;
bool operator<(const InputEdge &right) const
{
if (source != right.source)
return source < right.source;
return target < right.target;
}
};
// Constructs an empty graph with a given number of nodes.
explicit DynamicGraph(int32_t nodes) : m_numNodes(nodes), m_numEdges(0)
{
m_nodes.reserve(m_numNodes);
m_nodes.resize(m_numNodes);
m_edges.reserve(m_numNodes * 1.1);
m_edges.resize(m_numNodes);
}
template <class ContainerT> DynamicGraph(const int32_t nodes, const ContainerT &graph)
{
m_numNodes = nodes;
m_numEdges = (EdgeIterator)graph.size();
m_nodes.reserve(m_numNodes + 1);
m_nodes.resize(m_numNodes + 1);
EdgeIterator edge = 0;
EdgeIterator position = 0;
for (NodeIterator node = 0; node < m_numNodes; ++node)
{
EdgeIterator lastEdge = edge;
while (edge < m_numEdges && graph[edge].source == node)
{
++edge;
}
m_nodes[node].firstEdge = position;
m_nodes[node].edges = edge - lastEdge;
position += m_nodes[node].edges;
}
m_nodes.back().firstEdge = position;
m_edges.reserve(position * 1.1);
m_edges.resize(position);
edge = 0;
for (NodeIterator node = 0; node < m_numNodes; ++node)
{
for (EdgeIterator i = m_nodes[node].firstEdge,
e = m_nodes[node].firstEdge + m_nodes[node].edges;
i != e;
++i)
{
m_edges[i].target = graph[edge].target;
m_edges[i].data = graph[edge].data;
BOOST_ASSERT_MSG(graph[edge].data.distance > 0, "edge distance invalid");
++edge;
}
}
}
~DynamicGraph() {}
unsigned GetNumberOfNodes() const { return m_numNodes; }
unsigned GetNumberOfEdges() const { return m_numEdges; }
unsigned GetOutDegree(const NodeIterator n) const { return m_nodes[n].edges; }
unsigned GetDirectedOutDegree(const NodeIterator n) const
{
unsigned degree = 0;
for(EdgeIterator edge = BeginEdges(n); edge < EndEdges(n); ++edge)
{
if (GetEdgeData(edge).forward)
{
++degree;
}
}
return degree;
}
NodeIterator GetTarget(const EdgeIterator e) const { return NodeIterator(m_edges[e].target); }
void SetTarget(const EdgeIterator e, const NodeIterator n) { m_edges[e].target = n; }
EdgeDataT &GetEdgeData(const EdgeIterator e) { return m_edges[e].data; }
const EdgeDataT &GetEdgeData(const EdgeIterator e) const { return m_edges[e].data; }
EdgeIterator BeginEdges(const NodeIterator n) const
{
return EdgeIterator(m_nodes[n].firstEdge);
}
EdgeIterator EndEdges(const NodeIterator n) const
{
return EdgeIterator(m_nodes[n].firstEdge + m_nodes[n].edges);
}
EdgeRange GetAdjacentEdgeRange(const NodeIterator node) const
{
return boost::irange(BeginEdges(node), EndEdges(node));
}
// adds an edge. Invalidates edge iterators for the source node
EdgeIterator InsertEdge(const NodeIterator from, const NodeIterator to, const EdgeDataT &data)
{
Node &node = m_nodes[from];
EdgeIterator newFirstEdge = node.edges + node.firstEdge;
if (newFirstEdge >= m_edges.size() || !isDummy(newFirstEdge))
{
if (node.firstEdge != 0 && isDummy(node.firstEdge - 1))
{
node.firstEdge--;
m_edges[node.firstEdge] = m_edges[node.firstEdge + node.edges];
}
else
{
EdgeIterator newFirstEdge = (EdgeIterator)m_edges.size();
unsigned newSize = node.edges * 1.1 + 2;
EdgeIterator requiredCapacity = newSize + m_edges.size();
EdgeIterator oldCapacity = m_edges.capacity();
if (requiredCapacity >= oldCapacity)
{
m_edges.reserve(requiredCapacity * 1.1);
}
m_edges.resize(m_edges.size() + newSize);
for (EdgeIterator i = 0; i < node.edges; ++i)
{
m_edges[newFirstEdge + i] = m_edges[node.firstEdge + i];
makeDummy(node.firstEdge + i);
}
for (EdgeIterator i = node.edges + 1; i < newSize; ++i)
makeDummy(newFirstEdge + i);
node.firstEdge = newFirstEdge;
}
}
Edge &edge = m_edges[node.firstEdge + node.edges];
edge.target = to;
edge.data = data;
++m_numEdges;
++node.edges;
return EdgeIterator(node.firstEdge + node.edges);
}
// removes an edge. Invalidates edge iterators for the source node
void DeleteEdge(const NodeIterator source, const EdgeIterator e)
{
Node &node = m_nodes[source];
#pragma omp atomic
--m_numEdges;
--node.edges;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != node.edges);
const unsigned last = node.firstEdge + node.edges;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != last);
// swap with last edge
m_edges[e] = m_edges[last];
makeDummy(last);
}
// removes all edges (source,target)
int32_t DeleteEdgesTo(const NodeIterator source, const NodeIterator target)
{
int32_t deleted = 0;
for (EdgeIterator i = BeginEdges(source), iend = EndEdges(source); i < iend - deleted; ++i)
{
if (m_edges[i].target == target)
{
do
{
deleted++;
m_edges[i] = m_edges[iend - deleted];
makeDummy(iend - deleted);
} while (i < iend - deleted && m_edges[i].target == target);
}
}
#pragma omp atomic
m_numEdges -= deleted;
m_nodes[source].edges -= deleted;
return deleted;
}
// searches for a specific edge
EdgeIterator FindEdge(const NodeIterator from, const NodeIterator to) const
{
for (EdgeIterator i = BeginEdges(from), iend = EndEdges(from); i != iend; ++i)
{
if (to == m_edges[i].target)
{
return i;
}
}
return EndEdges(from);
}
protected:
bool isDummy(const EdgeIterator edge) const
{
return m_edges[edge].target == (std::numeric_limits<NodeIterator>::max)();
}
void makeDummy(const EdgeIterator edge)
{
m_edges[edge].target = (std::numeric_limits<NodeIterator>::max)();
}
struct Node
{
// index of the first edge
EdgeIterator firstEdge;
// amount of edges
unsigned edges;
};
struct Edge
{
NodeIterator target;
EdgeDataT data;
};
NodeIterator m_numNodes;
EdgeIterator m_numEdges;
std::vector<Node> m_nodes;
DeallocatingVector<Edge> m_edges;
};
#endif // DYNAMICGRAPH_H
|
detector.c | #include <dirent.h>
#include "darknet.h"
#include "image.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0) {
cuda_set_device(gpus[i]);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
#ifndef BENCHMARK
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
#endif
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
#ifdef LOSS_ONLY
double time=what_time_is_it_now();
#else
double time;
#endif
int count = 0;
char buff[1024];
sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base);
save_weights(net, buff);
int max_size = ((net->w + net->h)/2);
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Resizing\n");
#endif
int dim = max_size - ((rand() % 8) * 32);;
#ifdef BENCHMARK
dim = 608;
#endif
if (get_current_batch(net)+200 > net->max_batches) dim = max_size;
if (net->w < dim || net->h < dim) dim = max_size;
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("%d\n", dim);
#endif
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
#ifndef LOSS_ONLY
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
#endif
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
float loss = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
}
else {
loss = train_network(net, train);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
#ifdef LOSS_ONLY
printf("%lf\t%f\n", what_time_is_it_now()-time, loss);
#else
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
#endif
if(i%100==0){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
#ifdef GPU_STATS
cuda_dump_mem_stat();
#endif
#ifdef BENCHMARK
break;
#endif
}
#ifdef GPU
if(gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
free(paths);
free(plist);
free(base);
free(nets);
free(options);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
// list *plist = get_paths("data/coco_val_5k.list");
list *options = read_data_cfg(datacfg);
char *test_images = option_find_str(options, "test", "data/test.list");
list *plist = get_paths(test_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window_cv("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
if (resize) free_image(sized);
if (filename) break;
}
}
int exists(const char *fname, const char* ext)
{
FILE *file;
if (strstr(fname, ext) && (file = fopen(fname, "r")))
{
fclose(file);
return 1;
}
return 0;
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
FG_PersistentHomology_template.h | #define FG_PERSISTENTHOMOLOGY_TEMPLATE_H
#include "FG_PersistentHomology.h"
using namespace std;
using namespace ttk;
template <class dataType> void ttk::FG_PersistentHomology::computeIndexing(double& maxF, double& minF){
dataType* field = (dataType*)inputData_;
maxF = 0;
minF = double(field[0]);
// building the indexing
int vertices = triangulation_->getNumberOfVertices();
vector<pair<dataType,SimplexId> > thepairs(vertices);
for(int i=0; i<vertices; i++){
maxF = std::max(maxF, (double)field[i]);
minF = std::min(minF, (double)field[i]);
thepairs[i] = pair<dataType,SimplexId>(field[i],i);
}
sort(thepairs.begin(), thepairs.end());
indexing_ = new vector<SimplexId>(vertices);
for(SimplexId i=0; i<vertices; i++){
(*indexing_)[thepairs[i].second]=i;
}
computeGradient(indexing_);
}
template <class dataType> void ttk::FG_PersistentHomology::computeBoundayMatrix(){
BoundaryMatrix<dataType>* bdmatrix = new BoundaryMatrix<dataType>();
map<Simplex, int> simplex_to_index;
int global_index=0;
for(int d=0; d <= dimensionality_; d++){
for(auto i : criticalSimplices[d]){
Simplex simplex = Simplex(d,i);
simplex_to_index[simplex] = global_index++; //add the simplex to the set
}
}
for(int d=0; d <= dimensionality_; d++){
vector<SimplexId> criticals(criticalSimplices[d].begin(),criticalSimplices[d].end());
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for shared(bdmatrix,criticals)
#endif
for(int i=0; i<criticals.size(); i++){
Simplex simplex = Simplex(d,criticals[i]);
vector<Simplex> boundary;
if(simplex.first == 0){
#ifdef TTK_ENABLE_OPENMP
#pragma omp critical
{
#endif
bdmatrix->addValue(simplex_to_index[simplex],-1,getIndex(simplex));
#ifdef TTK_ENABLE_OPENMP
}
#endif
}
else{
getBoundarySimplices(simplex,boundary);
if(boundary.size() == 0){
#ifdef TTK_ENABLE_OPENMP
#pragma omp critical
{
#endif
bdmatrix->addValue(simplex_to_index[simplex],-1,getIndex(simplex));
#ifdef TTK_ENABLE_OPENMP
}
#endif
}
else{
#ifdef TTK_ENABLE_OPENMP
#pragma omp critical
{
#endif
for(auto s : boundary){
bdmatrix->addValue(simplex_to_index[simplex],simplex_to_index[s],getIndex(simplex));
}
#ifdef TTK_ENABLE_OPENMP
}
#endif
}
}
}
}
index_to_simplex = vector<Simplex>(global_index);
for(auto p : simplex_to_index)
index_to_simplex[p.second] = p.first;
Timer t;
bdmatrix->sort();
bdmatrix->reduce();
bdmatrix->getMatrix(matrix);
bdmatrix->getPairs(allpairs);
bdmatrix->getHomology(homology);
}
template <class dataType>
void FG_PersistentHomology::findPersistenceInterval(double& minPers, double& maxPers){
dataType minP = 0;
dataType maxP = 0;
int count_real_pairs = 0;
for(auto ppair :allpairs){
Simplex simpl1 = index_to_simplex[ppair.first];
Simplex simpl2 = index_to_simplex[ppair.second];
dataType f1 = getFiltration<dataType>(simpl1);
dataType f2 = getFiltration<dataType>(simpl2);
dataType filtr = f1-f2;
if(filtr < 0) filtr = -filtr;
if(filtr != 0)
count_real_pairs++;
if (minP > filtr)
minP = filtr;
if (maxP < filtr)
maxP = filtr;
}
minPers = double(minP);
maxPers = double(maxP);
cout << allpairs.size() << " total number of pairs " << count_real_pairs << " with non-zero persistence" << endl;
cout << " Minimum Persistence: " << minPers << endl;
cout << " Maximum Persistence: " << maxPers << endl;
}
template <class dataType>
void FG_PersistentHomology::readPersistencePairs(vector<float>& points, vector<double>& fvalues, vector<char>& dims, double realMinPers, double realMaxPers){
vector<float> coords;
for(auto ppair :allpairs){
Simplex simpl1 = index_to_simplex[ppair.first];
Simplex simpl2 = index_to_simplex[ppair.second];
dataType f1 = getFiltration<dataType>(simpl1);
dataType f2 = getFiltration<dataType>(simpl2);
double filtr = double(f1-f2);
if(filtr < 0) filtr = -filtr;
if(filtr >= realMinPers && filtr <= realMaxPers){
computeBarycenter(simpl1, coords);
points.push_back(coords[0]);
points.push_back(coords[1]);
points.push_back(coords[2]);
dims.push_back(simpl1.first);
computeBarycenter(simpl2, coords);
points.push_back(coords[0]);
points.push_back(coords[1]);
points.push_back(coords[2]);
dims.push_back(simpl2.first);
fvalues.push_back(f1);
fvalues.push_back(f2);
}
}
}
template <class dataType>
dataType FG_PersistentHomology::getFiltration(Simplex& simplex){
dataType *inputData = (dataType *) inputData_;
vector<SimplexId> vertices = vector<SimplexId>();
simplexToVertices(simplex, vertices);
//from the vertices select the field value of the simplex
dataType fieldValue = inputData[vertices[0]];
for(SimplexId v=1; v<vertices.size(); v++){
fieldValue = fieldValue > inputData[vertices[v]] ? fieldValue : inputData[vertices[v]];
}
return fieldValue;
}
template <class dataType>
void FG_PersistentHomology::computerPersistenceCycles(Simplex simpl1, Simplex simpl2, map<Simplex,int>& simplexToIndex, vector<int>& generators, bool save_hole, vector<int>& holes){
int dim = simpl2.first;
int pers_val = getIndex(simpl1);
set<SimplexId> growingHole;
set<SimplexId> visited_simpl;
visited_simpl.insert(simpl1.second);
set<SimplexId> growing_generator;
queue<int> seeds;
seeds.push(simpl2.second);
while(!seeds.empty()){
SimplexId triangleIdx = seeds.front();
seeds.pop();
if(save_hole)
growingHole.insert(triangleIdx);
for(int i=0; i<dim+1; i++){
SimplexId face = extractBoundary(Simplex(dim,triangleIdx),dim-1,i);
if(growing_generator.find(face) == growing_generator.end()){
growing_generator.insert(face);
}
else{
growing_generator.erase(face);
}
}
for(int i=0; i<dim+1; i++){
SimplexId face = extractBoundary(Simplex(dim,triangleIdx),dim-1,i);
Simplex old = Simplex(dim-1,face);
if(pers_val > getIndex(old))
continue;
Simplex pair;
bool isPaired = getPair(old, pair);
if(isPaired && pair.first == dim && pair.second != triangleIdx){
if(pers_val < getIndex(pair)) {
seeds.push(pair.second);
}
}
else if(!isPaired){
if(visited_simpl.find(old.second) == visited_simpl.end() && pers_val < getIndex(old)){
int index = simplexToIndex[old];
if(allpairs.find(index) != allpairs.end()){
seeds.push(index_to_simplex[allpairs[index]].second);
}
visited_simpl.insert(old.second);
}
}
}
}
generators.clear();
generators.insert(generators.end(), growing_generator.begin(), growing_generator.end());
if(save_hole){
holes.clear();
holes.insert(holes.end(), growingHole.begin(), growingHole.end());
}
}
// template <class dataType>
// void FG_PersistentHomology::computeGeneratorForman(Simplex simpl1, Simplex simpl2, vector<int>& column, vector<int>& generators){
// for(int cSaddle : column){
// Simplex simpl = index_to_simplex[cSaddle];
// queue<int> seeds;
// seeds.push(simpl.second);
// generators.push_back(simpl.second);
// while(!seeds.empty()){
// SimplexId simplId = seeds.front();
// seeds.pop();
// for(int i=0; i<simpl.first+1; i++) {
// SimplexId face = extractBoundary(Simplex(simpl.first, simplId), simpl.first - 1, i);
// Simplex pair;
// bool isPaired = getPair(Simplex(simpl.first-1,face), pair);
// if(isPaired && pair.first == simpl.first && pair.second != simplId){
// seeds.push(pair.second);
// generators.push_back(pair.second);
// }
// }
// }
// }
// }
template <class dataType>
void FG_PersistentHomology::readCycle(vector<float>& coordinates_gen,
vector<vector<int> >& simplices_gen,
list<int>& indices_gen,
vector<int>& vertices_hole,
vector<vector<int> >& simplices_hole,
list<int>& indices_hole,
vector<double>& filtration, double minPers, double maxPers, bool formanCycles, bool compute_hole, int dim){
list<vector<int> > list_generators;
list<vector<int> > list_holes;
map<Simplex,int> simplexToIndex;
if(!formanCycles){
for(int i=0; i<index_to_simplex.size(); i++){
simplexToIndex[index_to_simplex[i]]=i;
}
}
vector<SimplexId> origin_homology = vector<SimplexId>();
for(auto ppair :allpairs) {
Simplex simpl1 = index_to_simplex[ppair.first];
if(simpl1.first != dim)
continue;
origin_homology.push_back(ppair.first);
}
int tot_simplices=0;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for shared(filtration, list_generators, tot_simplices)
#endif
for(int i=0; i<origin_homology.size(); i++){
Simplex simpl1 = index_to_simplex[origin_homology[i]];
Simplex simpl2 = index_to_simplex[allpairs[origin_homology[i]]];
if(simpl1.first != dim)
continue;
dataType f1 = getFiltration<dataType>(simpl1);
dataType f2 = getFiltration<dataType>(simpl2);
double filtr = double(f2-f1);
if(filtr < 0) filtr = -filtr;
if( filtr >= minPers && filtr <= maxPers){
vector<int> generator;
vector<int> hole;
computerPersistenceCycles<dataType>(simpl1, simpl2, simplexToIndex, generator, compute_hole, hole);
#ifdef TTK_ENABLE_OPENMP
#pragma omp critical
{
#endif
list_generators.push_back(generator);
filtration.push_back(filtr);
if(compute_hole)
list_holes.push_back(hole);
tot_simplices += generator.size();
#ifdef TTK_ENABLE_OPENMP
}
#endif
}
}
//prepare vertices from the list of generators
map<int,int> unique_vertices;
int count = 0;
vector<float> coords;
for(auto gen : list_generators){
for(auto simpl : gen){
for(int i=0; i<dim+1; i++){
SimplexId v = extractBoundary(Simplex(dim,simpl),0,i);
if(unique_vertices.find(v) == unique_vertices.end()){
unique_vertices[v] = count++;
Simplex vertex = Simplex(0,v);
computeBarycenter(vertex, coords);
for(int k=0; k<coords.size(); k++)
coordinates_gen.push_back(coords[k]);
}
}
}
}
for(auto gen : list_generators){
for(auto simpl : gen){
vector<int> vertices(dim+1);
for(int i=0; i<dim+1; i++){
vertices[i] = unique_vertices[extractBoundary(Simplex(dim,simpl),0,i)];
}
simplices_gen.push_back(vertices);
}
indices_gen.push_back(gen.size());
}
//prepare vertices from the list of holes
if(compute_hole){
unique_vertices.clear();
count = 0;
for (auto gen : list_holes) {
for (auto simpl : gen) {
for (int i = 0; i < dim + 2; i++) {
SimplexId v = extractBoundary(Simplex(dim + 1, simpl), 0, i);
if (unique_vertices.find(v) == unique_vertices.end()) {
unique_vertices[v] = count++;
vertices_hole.push_back(v);
}
}
}
}
for (auto gen : list_holes) {
for (auto simpl : gen) {
vector<int> vertices(dim + 2);
for (int i = 0; i < dim + 2; i++) {
vertices[i] = unique_vertices[extractBoundary(Simplex(dim + 1, simpl), 0, i)];
}
simplices_hole.push_back(vertices);
}
indices_hole.push_back(gen.size());
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.