source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#if QMC_BUILD_LEVEL<5
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#include <qmc_common.h>
#endif
#include "Particle/DistanceTableData.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
struct J2OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType=FT;
///type of each component U, dU, d2U;
using valT=typename FT::real_type;
///element position type
using posT=TinyVector<valT,OHMMS_DIM>;
///use the same container
using RowContainer=DistanceTableData::RowContainer;
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///task id
int TaskID;
///Used to compute correction
bool FirstTime;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
using gContainer_type=VectorSoaContainer<valT,OHMMS_DIM>;
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
///Uniquue J2 set for cleanup
std::map<std::string,FT*> J2Unique;
J2OrbitalSoA(ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs)=delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
void resetTargetParticleSet(ParticleSet& P)
{
if(dPsi)
dPsi->resetTargetParticleSet(P);
}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable=myVars.is_optimizable();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if(dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if(!Optimizable)
return;
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if(dPsi)
dPsi->resetParameters( active );
for(int i=0; i<myVars.size(); ++i)
{
int ii=myVars.Index[i];
if(ii>=0)
myVars[i]= active[ii];
}
}
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
ChiesaKEcorrection();
}
RealType ChiesaKEcorrection() { return RealType();}
/**@} */
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
RealType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
ValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for(int k=0; k<ratios.size(); ++k)
ratios[k]=std::exp(Uat[VP.refPtcl] -
computeU(VP.refPS, VP.refPtcl, VP.DistTables[0]->Distances[k]));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch=false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if ( Bytes_in_WFBuffer == 0 )
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current()-Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded*OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch=false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist)
{
valT curUat(0);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle=false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const
{
posT grad;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<N; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
return grad;
}
/**@} */
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : TaskID(tid)
{
init(p);
FirstTime =true;
KEcorr=0.0;
ClassName = "J2OrbitalSoA";
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it=J2Unique.begin();
while(it != J2Unique.end())
{
delete ((*it).second);
++it;
}
}//need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N=p.getTotalNum();
N_padded=getAlignedSize<valT>(N);
NumGroups=p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups*NumGroups,nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if(ia==ib)
{
if(ia==0)//first time, assign everything
{
int ij=0;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg, ++ij)
if(F[ij]==nullptr) F[ij]=j;
}
else
F[ia*NumGroups+ib]=j;
}
else
{
if(N==2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg)
F[ig*NumGroups+jg]=j;
}
else
{
// generic case
F[ia*NumGroups+ib]=j;
F[ib*NumGroups+ia]=j;
}
}
std::stringstream aname;
aname<<ia<<ib;
J2Unique[aname.str()]=j;
//ChiesaKEcorrection();
FirstTime = false;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy=new J2OrbitalSoA<FT>(tqp,-1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*,FT*> fcmap;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=ig; jg<NumGroups; ++jg)
{
int ij=ig*NumGroups+jg;
if(F[ij]==0)
continue;
typename std::map<const FT*,FT*>::iterator fit=fcmap.find(F[ij]);
if(fit == fcmap.end())
{
FT* fc=new FT(*F[ij]);
j2copy->addFunc(ig,jg,fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]]=fc;
}
}
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void
J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle)
{
const int jelmax=triangle?iat:N;
constexpr valT czero(0);
std::fill_n(u, jelmax,czero);
std::fill_n(du, jelmax,czero);
std::fill_n(d2u,jelmax,czero);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax,P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode=ORB_PBYP_RATIO;
cur_Uat=computeU(P, iat, P.DistTables[0]->Temp_r.data());
return std::exp(Uat[iat]-cur_Uat);
}
template<typename FT>
inline void
J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const DistanceTableData* d_table=P.DistTables[0];
const auto dist=d_table->Temp_r.data();
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
valT sumU(0);
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data());
}
for(int i=P.first(ig); i<P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt+ig]->evaluate(dist[i]);
ratios[i]=std::exp(Uat[i]+Uself-sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType
J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode=ORB_PBYP_PARTIAL;
computeU3(P,iat,P.DistTables[0]->Temp_r.data(), cur_u.data(),cur_du.data(),cur_d2u.data());
cur_Uat=simd::accumulate_n(cur_u.data(),N,valT());
DiffVal=Uat[iat]-cur_Uat;
grad_iat+=accumulateG(cur_du.data(),P.DistTables[0]->Temp_dr);
return std::exp(DiffVal);
}
template<typename FT>
void
J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat)
{
// get the old u, du, d2u
const DistanceTableData* d_table=P.DistTables[0];
computeU3(P,iat,d_table->Distances[iat],old_u.data(),old_du.data(),old_d2u.data());
if(UpdateMode == ORB_PBYP_RATIO)
{//ratio-only during the move; need to compute derivatives
const auto dist=d_table->Temp_r.data();
computeU3(P,iat,dist,cur_u.data(),cur_du.data(),cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr=d_table->Temp_dr;
const auto& old_dr=d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:cur_d2Uat)
for(int jat=0; jat<N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac*cur_du[jat];
const valT dl = old_d2u[jat] + lapfac*old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict new_dX=new_dr.data(idim);
const valT* restrict old_dX=old_dr.data(idim);
const valT* restrict cur_du_pt=cur_du.data();
const valT* restrict old_du_pt=old_du.data();
valT* restrict save_g=dUat.data(idim);
valT cur_g=cur_dUat[idim];
#pragma omp simd reduction(+:cur_g) aligned(old_dX,new_dX,save_g,cur_du_pt,old_du_pt)
for(int jat=0; jat<N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat]*old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat]-cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void
J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const DistanceTableData* d_table=P.DistTables[0];
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
for(int iat=P.first(ig),last=P.last(ig); iat<last; ++iat)
{
computeU3(P,iat,d_table->Distances[iat],cur_u.data(),cur_du.data(),cur_d2u.data(),true);
Uat[iat]=simd::accumulate_n(cur_u.data(),iat,valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const RowContainer& displ = d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:lap) aligned(du,d2u)
for(int jat=0; jat<iat; ++jat)
lap+=d2u[jat]+lapfac*du[jat];
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<iat; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
dUat(iat)=grad;
d2Uat[iat]=-lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u,du,d2u)
for(int jat=0; jat<iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat]+lapfac*du[jat];
}
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT* restrict save_g=dUat.data(idim);
const valT* restrict dX=displ.data(idim);
#pragma omp simd aligned(save_g,du,dX)
for(int jat=0; jat<iat; jat++)
save_g[jat]-=du[jat]*dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::RealType
J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P,G,L,true);
return LogValue;
}
template<typename FT>
void
J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch)
{
if(fromscratch) recompute(P);
LogValue=valT(0);
for(int iat=0; iat<N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
constexpr valT mhalf(-0.5);
LogValue=mhalf*LogValue;
}
}
#endif
|
for-task-for-task.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#define NUM_OUTER_THREADS 16
#define NUM_INNER_THREADS 16
#define SMALL_LOOPCOUNT 64
/*! Utility function to spend some time in a loop */
static void do_some_work (void) {
int i;
double sum = 0;
for(i = 0; i < 1000; i++) {
sum += sqrt(i);
}
}
int test_omp_parallel_for_task_for_task() {
int vals[SMALL_LOOPCOUNT];
int i;
for (i = 0; i < SMALL_LOOPCOUNT; i++) {
vals[i] = 0;
}
#pragma omp parallel firstprivate(vals) num_threads(NUM_OUTER_THREADS)
#pragma omp master
{
for (i = 1; i <= SMALL_LOOPCOUNT; i++) {
#pragma omp task firstprivate(i) firstprivate(vals)
{
#pragma omp parallel num_threads(NUM_INNER_THREADS) firstprivate(i)
#pragma omp master
{
int j;
for (j = 1; j <= SMALL_LOOPCOUNT; j++) {
#pragma omp task firstprivate(i)
{
int k;
do_some_work();
for (k = 0; k < j % 4; k++) {
#pragma omp taskyield
}
#pragma omp atomic
vals[i] += j;
}
}
}
{
int j;
for (j = 0; j < i % 5; j++) {
#pragma omp taskyield
}
}
}
}
}
int num_failed = 0;
int known_sum = SMALL_LOOPCOUNT * (SMALL_LOOPCOUNT + 1) / 2;
for (i = 0; i < SMALL_LOOPCOUNT; i++) {
if (vals[i] != known_sum)
num_failed++;
}
return num_failed ? 1 : 0;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_parallel_for_task_for_task()) {
num_failed++;
}
}
return num_failed;
}
|
mkldnn_quantize_v2-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_quantize_v2-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#if MXNET_USE_MKLDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/mkldnn/mkldnn_base-inl.h"
#include "../quantize_v2-inl.h"
namespace mxnet {
namespace op {
class SgMKLDNNQuantizeOperator {
public:
explicit SgMKLDNNQuantizeOperator(const nnvm::NodeAttrs &attrs)
: param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {}
void Forward(const OpContext &ctx, const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs);
private:
bool initalized_{false};
QuantizeV2Param param_;
float cached_data_min_{0.f};
float cached_data_max_{0.f};
mkldnn::memory::desc o_desc_;
mkldnn_args_map_t args_;
std::shared_ptr<mkldnn::reorder> fwd_pd_;
};
void SgMKLDNNQuantizeOperator::Forward(const OpContext &ctx, const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
float quantized_range = 0.0;
NDArray in_buffer = inputs[0];
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
*outputs[1].data().dptr<float>() = param_.min_calib_range.value();
*outputs[2].data().dptr<float>() = param_.max_calib_range.value();
} else {
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 0;
*outputs[2].data().dptr<float>() = kUint8Range;
} else {
*outputs[1].data().dptr<float>() = -kInt8Range;
*outputs[2].data().dptr<float>() = kInt8Range;
}
}
if (req[0] != kWriteInplace) {
const_cast<NDArray &>(outputs[0]).CopyFrom(*inputs[0].GetMKLDNNData());
MKLDNNStream::Get()->Submit();
}
} else {
if (in_buffer.IsView() && in_buffer.IsMKLDNNData()) in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetMKLDNNData();
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
data_min = param_.min_calib_range.value();
data_max = param_.max_calib_range.value();
} else {
// no calib info
in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<float>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max) data_max = data_maxs[i];
if (data_mins[i] < data_min) data_min = data_mins[i];
}
if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max))
initalized_ = false;
}
// Write output min/max
auto out_type = GetQuantizeOutputType(param_);
if (out_type == mshadow::kUint8) {
quantized_range = kUint8Range;
*outputs[1].data().dptr<float>() = data_min;
*outputs[2].data().dptr<float>() = data_max;
} else if (out_type == mshadow::kInt8) {
float real_range = MaxAbs(data_min, data_max);
quantized_range = kInt8Range;
*outputs[1].data().dptr<float>() = -real_range;
*outputs[2].data().dptr<float>() = real_range;
} else {
LOG(FATAL) << "mkldnn quantize op only supports int8 and uint8 as output type";
}
if (!initalized_) {
cached_data_min_ = data_min;
cached_data_max_ = data_max;
float real_range = MaxAbs(data_min, data_max);
float scale = quantized_range / real_range;
mkldnn::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
auto i_desc = i_mem->get_desc();
size_t i_ndim = in_buffer.shape().ndim();
if (i_ndim == 4) {
mkldnn::memory::format_tag o_fmt = mkldnn::memory::format_tag::nhwc;
mkldnn::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims);
o_desc_ = mkldnn::memory::desc(o_dims, get_mkldnn_type(out_type), o_fmt);
} else {
o_desc_ = i_desc;
o_desc_.data.data_type = get_mkldnn_type_t(out_type);
}
auto reorder_pd =
mkldnn::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr);
fwd_pd_ = std::make_shared<mkldnn::reorder>(reorder_pd);
initalized_ = true;
}
auto o_mem = CreateMKLDNNMem(outputs[0], o_desc_, req[0]);
args_[MKLDNN_ARG_FROM] = *i_mem;
args_[MKLDNN_ARG_TO] = *o_mem.second;
MKLDNNStream::Get()->RegisterPrimArgs(*fwd_pd_, args_);
CommitOutput(outputs[0], o_mem);
MKLDNNStream::Get()->Submit();
}
}
static void SgMKLDNNQuantizeForward(const OpStatePtr &state_ptr, const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
SgMKLDNNQuantizeOperator &op = state_ptr.get_state<SgMKLDNNQuantizeOperator>();
op.Forward(ctx, inputs, req, outputs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
|
dependences_mutexinoutset.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// GCC 9 introduced codegen for mutexinoutset
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// icc does not yet support mutexinoutset
// XFAIL: icc
// clang 9 introduced codegen for mutexinoutset
// UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{
x++;
delay(100);
}
print_fuzzy_address(1);
print_ids(0);
#pragma omp task depend(mutexinoutset : x)
{
x++;
delay(100);
}
print_fuzzy_address(2);
print_ids(0);
#pragma omp task depend(in : x)
{ x = -1; }
print_ids(0);
}
}
x++;
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
|
callback_openmp.c | // RUN: %clang_cc1 -triple i386-unknown-unknown -fopenmp -O1 %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -triple i386-unknown-unknown -fopenmp -O1 %s -emit-llvm -o - | opt -ipconstprop -S | FileCheck --check-prefix=IPCP %s
// CHECK: declare !callback ![[cid:[0-9]+]] void @__kmpc_fork_call
// CHECK: declare !callback ![[cid]] void @__kmpc_fork_teams
// CHECK: ![[cid]] = !{![[cidb:[0-9]+]]}
// CHECK: ![[cidb]] = !{i64 2, i64 -1, i64 -1, i1 true}
void work1(int, int);
void work2(int, int);
void work12(int, int);
void foo(int q) {
int p = 2;
#pragma omp parallel firstprivate(q, p)
work1(p, q);
// IPCP: call void @work1(i32 2, i32 %{{[._a-zA-Z0-9]*}})
#pragma omp parallel for firstprivate(p, q)
for (int i = 0; i < q; i++)
work2(i, p);
// IPCP: call void @work2(i32 %{{[._a-zA-Z0-9]*}}, i32 2)
#pragma omp target teams firstprivate(p)
work12(p, p);
// IPCP: call void @work12(i32 2, i32 2)
}
|
GB_unaryop__lnot_int16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_int16
// op(A') function: GB_tran__lnot_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_int16
(
int16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_messages.c | // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
#pragma omp declare // expected-error {{expected an OpenMP directive}}
int foo(void);
#pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}}
#pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}}
#pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} expected-error {{expected 'match' clause on}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}}
#pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}}
#pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}}
#pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp declare variant(foo) match(device = {kind(score(ibm) }) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<recovery-expr>()'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}}
int bar(void);
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}}
#pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}}
#pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}}
#pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}}
int score_and_cond_non_const();
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
int var;
#pragma omp threadprivate(var)
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma options align=packed
int main();
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma init_seg(compiler)
int main();
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int b, c;
int no_proto();
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int no_proto_too();
int proto1(int);
#pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto(); // expected-note {{previous declaration is here}}
int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}}
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto1(double);
int after_use_variant(void);
int after_use();
int bar() {
return after_use();
}
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int after_use(void);
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined(void) { return 0; }
int defined1(void) { return 0; }
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined1(void);
int diff_cc_variant(void);
#pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
__vectorcall int diff_cc(void);
int diff_ret_variant(void);
#pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void diff_ret(void);
void marked(void);
void not_marked(void);
#pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}}
void marked_variant(void);
#pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void marked(void);
#pragma omp declare variant(foo) match(device = {isa("foo")})
int unknown_isa_trait(void);
#pragma omp declare variant(foo) match(device = {isa(foo)})
int unknown_isa_trait2(void);
#pragma omp declare variant(foo) match(device = {kind(fpga), isa(bar)})
int ignored_isa_trait(void);
void caller() {
unknown_isa_trait(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
unknown_isa_trait2(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
ignored_isa_trait();
}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
// FIXME: If the scores are equivalent we should detect that and allow it.
#pragma omp begin declare variant match(implementation = {vendor(score(2) \
: llvm)})
#pragma omp declare variant(foo) match(implementation = {vendor(score(2) \
: llvm)}) // expected-error@-1 {{nested OpenMP context selector contains duplicated trait 'llvm' in selector 'vendor' and set 'implementation' with different score}}
int conflicting_nested_score(void);
#pragma omp end declare variant
// FIXME: We should build the conjuction of different conditions, see also the score fixme above.
#pragma omp begin declare variant match(user = {condition(1)})
#pragma omp declare variant(foo) match(user = {condition(1)}) // expected-error {{nested user conditions in OpenMP context selector not supported (yet)}}
int conflicting_nested_condition(void);
#pragma omp end declare variant
|
quicksort.h | // -*- C++ -*-
// Copyright (C) 2007-2020 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/quicksort.h
* @brief Implementation of a unbalanced parallel quicksort (in-place).
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUICKSORT_H
#define _GLIBCXX_PARALLEL_QUICKSORT_H 1
#include <parallel/parallel.h>
#include <parallel/partition.h>
namespace __gnu_parallel
{
/** @brief Unbalanced quicksort divide step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __pivot_rank Desired __rank of the pivot.
* @param __num_samples Choose pivot from that many samples.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
typename std::iterator_traits<_RAIter>::difference_type
__parallel_sort_qs_divide(_RAIter __begin, _RAIter __end,
_Compare __comp, typename std::iterator_traits
<_RAIter>::difference_type __pivot_rank,
typename std::iterator_traits
<_RAIter>::difference_type
__num_samples, _ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
__num_samples = std::min(__num_samples, __n);
// Allocate uninitialized, to avoid default constructor.
_ValueType* __samples = static_cast<_ValueType*>
(::operator new(__num_samples * sizeof(_ValueType)));
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
{
const unsigned long long __index = static_cast<unsigned long long>
(__s) * __n / __num_samples;
::new(&(__samples[__s])) _ValueType(__begin[__index]);
}
__gnu_sequential::sort(__samples, __samples + __num_samples, __comp);
_ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n];
__gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool>
__pred(__comp, __pivot);
_DifferenceType __split = __parallel_partition(__begin, __end,
__pred, __num_threads);
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
__samples[__s].~_ValueType();
::operator delete(__samples);
return __split;
}
/** @brief Unbalanced quicksort conquer step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
if (__num_threads <= 1)
{
__gnu_sequential::sort(__begin, __end, __comp);
return;
}
_DifferenceType __n = __end - __begin, __pivot_rank;
if (__n <= 1)
return;
_ThreadIndex __num_threads_left;
if ((__num_threads % 2) == 1)
__num_threads_left = __num_threads / 2 + 1;
else
__num_threads_left = __num_threads / 2;
__pivot_rank = __n * __num_threads_left / __num_threads;
_DifferenceType __split = __parallel_sort_qs_divide
(__begin, __end, __comp, __pivot_rank,
_Settings::get().sort_qs_num_samples_preset, __num_threads);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
__parallel_sort_qs_conquer(__begin, __begin + __split,
__comp, __num_threads_left);
#pragma omp section
__parallel_sort_qs_conquer(__begin + __split, __end,
__comp, __num_threads - __num_threads_left);
}
}
/** @brief Unbalanced quicksort main call.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator input sequence, ignored.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
_GLIBCXX_CALL(__n)
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
// At least one element per processor.
if (__num_threads > __n)
__num_threads = static_cast<_ThreadIndex>(__n);
__parallel_sort_qs_conquer(
__begin, __begin + __n, __comp, __num_threads);
}
} //namespace __gnu_parallel
#endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
|
GB_AxB_dot3_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot3_template: C<M>=A'*B via dot products, where C is sparse/hyper
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C and M are both sparse or hyper, and C->h is a copy of M->h.
// M is present, and not complemented. It may be valued or structural.
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [tid].kfirst ;
int64_t klast = TaskList [tid].klast ;
int64_t pC_first = TaskList [tid].pC ;
int64_t pC_last = TaskList [tid].pC_end ;
int64_t bpleft = 0 ; // Ch is not jumbled
int64_t task_nzombies = 0 ; // # of zombies found by this task
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get C(:,k) and M(:k)
//------------------------------------------------------------------
#if defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// M and C are sparse
const int64_t j = k ;
#else
// M and C are either both sparse or both hypersparse
const int64_t j = GBH (Ch, k) ;
#endif
int64_t pC_start = Cp [k] ;
int64_t pC_end = Cp [k+1] ;
if (k == kfirst)
{
// First vector for task; may only be partially owned.
pC_start = pC_first ;
pC_end = GB_IMIN (pC_end, pC_last) ;
}
else if (k == klast)
{
// Last vector for task; may only be partially owned.
pC_end = pC_last ;
}
else
{
// task completely owns this vector C(:,k).
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
#if GB_B_IS_HYPER
// B is hyper
int64_t pB_start, pB_end ;
GB_lookup (true, Bh, Bp, vlen, &bpleft, bnvec-1, j,
&pB_start, &pB_end) ;
#elif GB_B_IS_SPARSE
// B is sparse
const int64_t pB_start = Bp [j] ;
const int64_t pB_end = Bp [j+1] ;
#else
// B is bitmap or full
const int64_t pB_start = j * vlen ;
#endif
#if (GB_B_IS_SPARSE || GB_B_IS_HYPER)
const int64_t bjnz = pB_end - pB_start ;
if (bjnz == 0)
{
// no work to do if B(:,j) is empty, except for zombies
task_nzombies += (pC_end - pC_start) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// C(i,j) is a zombie
int64_t i = Mi [pC] ;
Ci [pC] = GB_FLIP (i) ;
}
continue ;
}
#if (GB_A_IS_SPARSE || GB_A_IS_HYPER)
// Both A and B are sparse; get first and last in B(:,j)
const int64_t ib_first = Bi [pB_start] ;
const int64_t ib_last = Bi [pB_end-1] ;
#endif
#endif
//------------------------------------------------------------------
// C(:,j)<M(:,j)> = A(:,i)'*B(:,j)
//------------------------------------------------------------------
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
//--------------------------------------------------------------
// get C(i,j) and M(i,j)
//--------------------------------------------------------------
bool cij_exists = false ;
GB_CIJ_DECLARE (cij) ;
// get the value of M(i,j)
int64_t i = Mi [pC] ;
#if !defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// if M is structural, no need to check its values
if (GB_mcast (Mx, pC, msize))
#endif
{
//----------------------------------------------------------
// the mask allows C(i,j) to be computed
//----------------------------------------------------------
#if GB_A_IS_HYPER
// A is hyper
int64_t pA, pA_end ;
int64_t apleft = 0 ; // M might be jumbled
GB_lookup (true, Ah, Ap, vlen, &apleft, anvec-1, i,
&pA, &pA_end) ;
const int64_t ainz = pA_end - pA ;
if (ainz > 0)
#elif GB_A_IS_SPARSE
// A is sparse
int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
const int64_t ainz = pA_end - pA ;
if (ainz > 0)
#else
// A is bitmap or full
const int64_t pA = i * vlen ;
#endif
{
// C(i,j) = A(:,i)'*B(:,j)
#include "GB_AxB_dot_cij.c"
}
}
if (!GB_CIJ_EXISTS)
{
// C(i,j) is a zombie
task_nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
nzombies += task_nzombies ;
}
}
#undef GB_A_IS_SPARSE
#undef GB_A_IS_HYPER
#undef GB_A_IS_BITMAP
#undef GB_A_IS_FULL
#undef GB_B_IS_SPARSE
#undef GB_B_IS_HYPER
#undef GB_B_IS_BITMAP
#undef GB_B_IS_FULL
|
IJVector_parcsr.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJVector_Par interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJVectorCreatePar
*
* creates ParVector if necessary, and leaves a pointer to it as the
* hypre_IJVector object
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorCreatePar(hypre_IJVector *vector,
HYPRE_BigInt *IJpartitioning)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int num_procs, j;
HYPRE_BigInt global_n, *partitioning, jmin;
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
jmin = hypre_IJVectorGlobalFirstRow(vector);
global_n = hypre_IJVectorGlobalNumRows(vector);
partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < 2; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#else
jmin = IJpartitioning[0];
global_n = IJpartitioning[num_procs] - jmin;
partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < num_procs+1; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#endif
hypre_IJVectorObject(vector) =
hypre_ParVectorCreate(comm, global_n, (HYPRE_BigInt *) partitioning);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDestroyPar
*
* frees ParVector local storage of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDestroyPar(hypre_IJVector *vector)
{
return hypre_ParVectorDestroy((hypre_ParVector*)hypre_IJVectorObject(vector));
}
/******************************************************************************
*
* hypre_IJVectorInitializePar
*
* initializes ParVector of IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorInitializePar(hypre_IJVector *vector)
{
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
HYPRE_Int my_id;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_MPI_Comm_rank(comm,&my_id);
if (!partitioning)
{
if (print_level)
{
hypre_printf("No ParVector partitioning for initialization -- ");
hypre_printf("hypre_IJVectorInitializePar\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_VectorSize(local_vector) = (HYPRE_Int)(partitioning[1] - partitioning[0]);
#else
hypre_VectorSize(local_vector) = (HYPRE_Int)(partitioning[my_id+1] - partitioning[my_id]);
#endif
hypre_ParVectorInitialize(par_vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorInitialize(aux_vector);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetMaxOffProcElmtsPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParVector *aux_vector;
aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDistributePar
*
* takes an IJVector generated for one processor and distributes it
* across many processors according to vec_starts,
* if vec_starts is NULL, it distributes them evenly?
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDistributePar(hypre_IJVector *vector,
const HYPRE_Int *vec_starts)
{
hypre_ParVector *old_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_ParVector *par_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!old_vector)
{
if (print_level)
{
hypre_printf("old_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
hypre_ParVectorLocalVector(old_vector),
(HYPRE_BigInt *)vec_starts);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is unallocated ****\n");
}
hypre_error_in_arg(1);
}
hypre_ParVectorDestroy(old_vector);
hypre_IJVectorObject(vector) = par_vector;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorZeroValuesPar
*
* zeroes all local components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorZeroValuesPar(hypre_IJVector *vector)
{
HYPRE_Int my_id;
HYPRE_Int i;
HYPRE_BigInt vec_start, vec_stop;
HYPRE_Complex *data;
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_BigInt *partitioning;
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
partitioning = hypre_ParVectorPartitioning(par_vector);
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = partitioning[0];
vec_stop = partitioning[1];
#else
vec_start = partitioning[my_id];
vec_stop = partitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData( local_vector );
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < (HYPRE_Int)(vec_stop - vec_start); i++)
data[i] = 0.;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetValuesPar
*
* sets a potentially noncontiguous set of components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int j, k;
HYPRE_BigInt i, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be set, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, store
indices and values in auxiliary vector structure. If indices == NULL,
assume that num_values components are to be set in a block starting at
vec_start. NOTE: If indices == NULL off proc values are ignored!!! */
data = hypre_VectorData(local_vector);
if (indices)
{
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i >= vec_start && i <= vec_stop)
{
k = (HYPRE_Int)( i- vec_start);
data[k] = values[j];
}
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = (HYPRE_Int)(vec_stop - vec_start) +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] = values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAddToValuesPar
*
* adds to a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAddToValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int max_off_proc_elmts
= hypre_AuxParVectorMaxOffProcElmts(aux_vector);
HYPRE_BigInt *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
HYPRE_Int k;
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
/* if elements outside processor boundaries, store in off processor
stash */
if (!max_off_proc_elmts)
{
max_off_proc_elmts = 100;
hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParVectorOffProcData(aux_vector)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
}
else if (current_num_elmts + 1 > max_off_proc_elmts)
{
max_off_proc_elmts += 10;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParVectorMaxOffProcElmts(aux_vector)
= max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
}
off_proc_i[current_num_elmts] = i;
off_proc_data[current_num_elmts++] = values[j];
hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
}
else /* local values are added to the vector */
{
k = (HYPRE_Int)(i - vec_start);
data[k] += values[j];
}
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = (HYPRE_Int)(vec_stop - vec_start) +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] += values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAssemblePar
*
* currently tests existence of of ParVector object and its partitioning
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAssemblePar(hypre_IJVector *vector)
{
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_BigInt *partitioning;
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
partitioning = hypre_ParVectorPartitioning(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** ParVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (aux_vector)
{
HYPRE_Int off_proc_elmts, current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_Complex *off_proc_data;
current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);
hypre_MPI_Allreduce(¤t_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT,
hypre_MPI_SUM,comm);
if (off_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts,
current_num_elmts, off_proc_i, off_proc_data);
hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector), HYPRE_MEMORY_HOST);
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;
hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorGetValuesPar
*
* get a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorGetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int j, k;
HYPRE_BigInt i, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int ierr = 0;
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1];
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, let
user know of catastrophe and exit. If indices == NULL, assume that
num_values components are to be retrieved from block starting at
vec_start */
if (indices)
{
for (i = 0; i < num_values; i++)
{
ierr += (indices[i] < vec_start);
ierr += (indices[i] >= vec_stop);
}
}
if (ierr)
{
if (print_level)
{
hypre_printf("indices beyond local range -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Indices specified are unusable ****\n");
}
hypre_error_in_arg(3);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
{
k = (HYPRE_Int)(indices[j] - vec_start);
values[j] = data[k];
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop-vec_start))
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
values[j] = data[j];
}
return hypre_error_flag;
}
/******************************************************************************
* hypre_IJVectorAssembleOffProcValsPar
*
* This is for handling set and get values calls to off-proc. entries - it is
* called from assemble. There is an alternate version for when the assumed
* partition is being used.
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_Complex *off_proc_data)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = ( hypre_ParVector *) hypre_IJVectorObject(vector);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, j, j2;
HYPRE_Int iii, indx, ip;
HYPRE_BigInt row, first_index;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends2;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_BigInt *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_BigInt *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_BigInt *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
HYPRE_Complex *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJVectorPartitioning(vector);
first_index = partitioning[my_id];
info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
proc_id_mem = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[i] = proc_id;
info[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
num_sends2 = 2*num_sends;
send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
int_buffer = hypre_CTAlloc(HYPRE_Int, num_sends2, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = info[i];
}
}
hypre_MPI_Allgather(&num_sends2,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(int_buffer,num_sends2,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=2)
{
if (recv_buf[j] == my_id)
num_recvs++;
}
recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
j2 = 0;
recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=2)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2++] = i;
recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];
}
if (j2 == num_recvs) break;
}
}
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(displs, HYPRE_MEMORY_HOST);
/* set up data to be sent to send procs */
/* send_i contains for each send proc
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_data = hypre_CTAlloc(HYPRE_Complex, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
recv_data = hypre_CTAlloc(HYPRE_Complex, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
proc_id = proc_id_mem[i];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
send_i[iii] = off_proc_i[i];
send_data[iii] = off_proc_data[i];
send_map_starts[indx]++;
}
hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
}
send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
hypre_TFree(send_procs, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_procs, HYPRE_MEMORY_HOST);
for (i=0; i < recv_vec_starts[num_recvs]; i++)
{
row = recv_i[i];
j = (HYPRE_Int)(row - first_index);
data[j] += recv_data[i];
}
hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_Complex *off_proc_data)
{
HYPRE_Int myid;
HYPRE_BigInt global_first_row, global_num_rows;
HYPRE_Int i, j, in, k;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_Int ex_num_contacts = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_count;
HYPRE_Int num_recvs;
HYPRE_Int counter;
HYPRE_BigInt upper_bound;
HYPRE_Int num_real_procs;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL;
HYPRE_Int tmp_int;
HYPRE_Int obj_size_bytes, big_int_size, complex_size;
HYPRE_Int first_index;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_BigInt *ex_contact_buf=NULL;
HYPRE_Complex *vector_data;
HYPRE_Complex value;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_rows = hypre_IJVectorGlobalNumRows(vector);
global_first_row = hypre_IJVectorGlobalFirstRow(vector);
/* verify that we have created the assumed partition */
if (hypre_IJVectorAssumedPart(vector) == NULL)
{
hypre_IJVectorCreateAssumedPartition(vector);
}
apart = (hypre_IJAssumedPart*) hypre_IJVectorAssumedPart(vector);
/* get the assumed processor id for each row */
a_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
row_list = hypre_CTAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
if (current_num_elmts > 0)
{
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
row_list[i] = row;
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_rows, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < current_num_elmts; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact
assumed processors and find out who the actual row owner is - we
will contact with a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< current_num_elmts; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1];
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_row,
global_num_rows, &range_start, &range_end);
}
}
/*finish the starts*/
ex_contact_vec_starts[counter] = counter*2;
/*finish the last range*/
if (counter > 0)
ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1];
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 4,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by an upper bound for the
range. */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
a_proc_id = NULL;
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = (HYPRE_Int)response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < current_num_elmts && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real procesors ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int and HYPRE_Complex data. (row number and value) - we will send
everything as a void since we may not know the rel sizes of ints and
doubles */
/* first find out how many elements to send per proc - so we can do
storage */
complex_size = sizeof(HYPRE_Complex);
big_int_size = sizeof(HYPRE_BigInt);
obj_size_bytes = hypre_max(big_int_size, complex_size);
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < current_num_elmts; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
}
}
}
/* calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2* num_rows_per_proc[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order */
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
prev_id = -1;
for (i=0; i < current_num_elmts; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in < 0)
{
in = -in - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt,1 , HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* add value */
tmp_complex = off_proc_data[i];
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* increment the indexes to keep track of where we are - fix later */
ex_contact_vec_starts[indx] = in;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* now get the info in send_proc_obj_d */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id = NULL; /* don't care who sent it to us */
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 5,
comm, (void **) &response_buf, &response_buf_starts);
/***********************************/
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and either set or add to the
vector data */
num_recvs = send_proc_obj.length;
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
first_index = hypre_ParVectorFirstIndex(par_vector);
for (i=0; i < num_recvs; i++)
{
indx = recv_starts[i];
/* get the number of rows for this recv */
hypre_TMemcpy( &row_count, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < row_count; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* value */
hypre_TMemcpy( &value, recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
k = (HYPRE_Int)(row - first_index - global_first_row);
vector_data[k] += value;
}
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#endif
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
std::memcpy(dst, src, sizeof(DType) * size);
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 8;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 8;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
omp_for_lastprivate.c | // RUN: %libomp-compile-and-run
// REQUIRES: !(abt && (clang || gcc))
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int sum0;
#pragma omp threadprivate(sum0)
int test_omp_for_lastprivate()
{
int sum = 0;
int known_sum;
int i0;
i0 = -1;
#pragma omp parallel
{
sum0 = 0;
{ /* Begin of orphaned block */
int i;
#pragma omp for schedule(static,7) lastprivate(i0)
for (i = 1; i <= LOOPCOUNT; i++) {
sum0 = sum0 + i;
i0 = i;
} /* end of for */
} /* end of orphaned block */
#pragma omp critical
{
sum = sum + sum0;
} /* end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf(stderr, "known_sum = %d , sum = %d\n",known_sum,sum);
fprintf(stderr, "LOOPCOUNT = %d , i0 = %d\n",LOOPCOUNT,i0);
return ((known_sum == sum) && (i0 == LOOPCOUNT));
}
int main()
{
int i;
int num_failed=0;
for (i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_lastprivate()) {
num_failed++;
}
}
return num_failed;
}
|
task_nested_omp.c |
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define NUM_TASKS 50000
#define NUM_REPS 1
int o = 0;
void sscal(float value, float *a)
{
*a = *a * value;
}
void na(float value)
{
o++;
}
void presscal(float value, float *a)
{
#pragma omp task
{
sscal(value, a);
}
#pragma omp task
{
na(value);
}
}
int main(int argc, char *argv[])
{
int i, r, nthreads;
double *time, avg_time = 0.0;
char *str, *endptr;
float *a;
double time2 = 0.0;
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
if (argc > 1) {
str = argv[1];
}
int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS;
if (ntasks < nthreads)
ntasks = nthreads;
int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS;
time = malloc(sizeof(double) * rep);
a = malloc(sizeof(float) * ntasks);
for (i = 0; i < ntasks; i++) {
a[i] = i + 100.0f;
}
for (r = 0; r < rep; r++) {
time[r] = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
{
time2 = omp_get_wtime();
for (i = 0; i < ntasks; i++) {
#pragma omp task firstprivate(i)
{
presscal(0.9f, &a[i]);
}
}
time2 = omp_get_wtime() - time2;
}
}
time[r] = omp_get_wtime() - time[r];
avg_time += time[r];
}
// TODO: Just works with one repetition
for (i = 0; i < ntasks; i++) {
if (a[i] != (i + 100.0f) * 0.9f) {
printf("error: a[%d]=%2.f expected %2.f\n", i,
a[i], (i + 100.0f) * 0.9f);
}
}
avg_time /= rep;
printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n",
nthreads, ntasks, avg_time, time2);
printf("o=%d deberia valer %d\n", o, ntasks);
return EXIT_SUCCESS;
}
|
bli_trsm_simd_ref.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
#if 1
// An implementation that attempts to facilitate emission of vectorized
// instructions via constant loop bounds + #pragma omp simd directives.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
const inc_t rs_a = 1; \
const inc_t cs_a = mr; \
\
const inc_t rs_b = nr; \
const inc_t cs_b = 1; \
\
PRAGMA_SIMD \
for ( dim_t i = 0; i < mr; ++i ) \
{ \
/* b1 = b1 - a10t * B0; */ \
/* b1 = b1 / alpha11; */ \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
ctype beta11c = b[i*rs_b + j*cs_b]; \
ctype rho11; \
\
/* beta11 = beta11 - a10t * b01; */ \
PASTEMAC(ch,set0s)( rho11 ); \
for ( dim_t l = 0; l < i; ++l ) \
{ \
PASTEMAC(ch,axpys)( a[i*rs_a + l*cs_a], \
b[l*rs_b + j*cs_b], rho11 ); \
} \
PASTEMAC(ch,subs)( rho11, beta11c ); \
\
/* beta11 = beta11 / alpha11; */ \
/* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead
of alpha11, so we can multiply rather than divide. We store
the inverse of alpha11 intentionally to avoid expensive
division instructions within the micro-kernel. */ \
PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \
\
/* Output final result to matrix c. */ \
PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \
\
/* Store the local value back to b11. */ \
PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \
} \
} \
}
//INSERT_GENTFUNC_BASIC2( trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
GENTFUNC( float, s, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 )
GENTFUNC( double, d, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( scomplex, c, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( dcomplex, z, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 )
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
const inc_t rs_a = 1; \
const inc_t cs_a = mr; \
\
const inc_t rs_b = nr; \
const inc_t cs_b = 1; \
\
PRAGMA_SIMD \
for ( dim_t iter = 0; iter < mr; ++iter ) \
{ \
dim_t i = mr - iter - 1; \
\
/* b1 = b1 - a12t * B2; */ \
/* b1 = b1 / alpha11; */ \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
ctype beta11c = b[i*rs_b + j*cs_b]; \
ctype rho11; \
\
/* beta11 = beta11 - a12t * b21; */ \
PASTEMAC(ch,set0s)( rho11 ); \
for ( dim_t l = 0; l < iter; ++l ) \
{ \
PASTEMAC(ch,axpys)( a[i*rs_a + (i+1+l)*cs_a], \
b[(i+1+l)*rs_b + j*cs_b], rho11 ); \
} \
PASTEMAC(ch,subs)( rho11, beta11c ); \
\
/* beta11 = beta11 / alpha11; */ \
/* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead
of alpha11, so we can multiply rather than divide. We store
the inverse of alpha11 intentionally to avoid expensive
division instructions within the micro-kernel. */ \
PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \
\
/* Output final result to matrix c. */ \
PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \
\
/* Store the local value back to b11. */ \
PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \
} \
} \
}
//INSERT_GENTFUNC_BASIC2( trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
GENTFUNC( float, s, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 )
GENTFUNC( double, d, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( scomplex, c, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( dcomplex, z, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 )
#else
#endif
|
for_simd.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// XFAIL: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
#include <omp.h>
int main()
{
int y[] = {0,1,2,3};
int i;
#pragma omp for simd
for (i = 0; i < 4; i++)
{
y[i]++;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at simd loop end
// CxxHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CxxHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CxxHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CxxHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{NULL}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{NULL}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{NULL}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{NULL}}
return 0;
}
|
volumeramprecision.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2013-2018 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_VOLUMERAMPRECISION_H
#define IVW_VOLUMERAMPRECISION_H
#include <inviwo/core/datastructures/volume/volumeram.h>
#include <inviwo/core/datastructures/volume/volumeramhistogram.h>
#include <inviwo/core/util/glm.h>
#include <inviwo/core/util/stdextensions.h>
#include <inviwo/core/datastructures/volume/volume.h>
namespace inviwo {
/**
* \ingroup datastructures
*/
template <typename T>
class VolumeRAMPrecision : public VolumeRAM {
public:
using type = T;
VolumeRAMPrecision(size3_t dimensions = size3_t(128, 128, 128));
VolumeRAMPrecision(T* data, size3_t dimensions = size3_t(128, 128, 128));
VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs);
VolumeRAMPrecision<T>& operator=(const VolumeRAMPrecision<T>& that);
virtual VolumeRAMPrecision<T>* clone() const override;
virtual ~VolumeRAMPrecision();
T* getDataTyped();
const T* getDataTyped() const;
virtual void* getData() override;
virtual const void* getData() const override;
virtual void* getData(size_t) override;
virtual const void* getData(size_t) const override;
virtual void setData(void* data, size3_t dimensions) override;
virtual void removeDataOwnership() override;
virtual const size3_t& getDimensions() const override;
virtual void setDimensions(size3_t dimensions) override;
virtual bool hasHistograms() const override;
virtual HistogramContainer* getHistograms(size_t bins = 2048u,
size3_t sampleRate = size3_t(1)) override;
virtual const HistogramContainer* getHistograms(size_t bins = 2048u,
size3_t sampleRate = size3_t(1)) const override;
virtual void calculateHistograms(size_t bins, size3_t sampleRate,
const bool& stop) const override;
virtual double getAsDouble(const size3_t& pos) const override;
virtual dvec2 getAsDVec2(const size3_t& pos) const override;
virtual dvec3 getAsDVec3(const size3_t& pos) const override;
virtual dvec4 getAsDVec4(const size3_t& pos) const override;
virtual void setFromDouble(const size3_t& pos, double val) override;
virtual void setFromDVec2(const size3_t& pos, dvec2 val) override;
virtual void setFromDVec3(const size3_t& pos, dvec3 val) override;
virtual void setFromDVec4(const size3_t& pos, dvec4 val) override;
virtual double getAsNormalizedDouble(const size3_t& pos) const override;
virtual dvec2 getAsNormalizedDVec2(const size3_t& pos) const override;
virtual dvec3 getAsNormalizedDVec3(const size3_t& pos) const override;
virtual dvec4 getAsNormalizedDVec4(const size3_t& pos) const override;
virtual void setFromNormalizedDouble(const size3_t& pos, double val) override;
virtual void setFromNormalizedDVec2(const size3_t& pos, dvec2 val) override;
virtual void setFromNormalizedDVec3(const size3_t& pos, dvec3 val) override;
virtual void setFromNormalizedDVec4(const size3_t& pos, dvec4 val) override;
void setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize,
const size3_t& subOffset) override;
virtual size_t getNumberOfBytes() const override;
private:
size3_t dimensions_;
bool ownsDataPtr_;
std::unique_ptr<T[]> data_;
mutable HistogramContainer histCont_;
};
/**
* Factory for volumes.
* Creates an VolumeRAM with data type specified by format.
*
* @param dimensions of volume to create.
* @param format of volume to create.
* @param dataPtr optional pointer to data to be handed into the volume.
* @return nullptr if no valid format was specified.
*/
IVW_CORE_API std::shared_ptr<VolumeRAM> createVolumeRAM(const size3_t& dimensions,
const DataFormatBase* format,
void* dataPtr = nullptr);
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(size3_t dimensions)
: VolumeRAM(DataFormat<T>::get())
, dimensions_(dimensions)
, ownsDataPtr_(true)
, data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {}
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(T* data, size3_t dimensions)
: VolumeRAM(DataFormat<T>::get())
, dimensions_(dimensions)
, ownsDataPtr_(true)
, data_(data ? data : new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {}
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs)
: VolumeRAM(rhs)
, dimensions_(rhs.dimensions_)
, ownsDataPtr_(true)
, data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]) {
std::memcpy(data_.get(), rhs.data_.get(),
dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T));
}
template <typename T>
VolumeRAMPrecision<T>& VolumeRAMPrecision<T>::operator=(const VolumeRAMPrecision<T>& that) {
if (this != &that) {
VolumeRAM::operator=(that);
auto dim = that.dimensions_;
auto data = util::make_unique<T[]>(dim.x * dim.y * dim.z);
std::memcpy(data.get(), that.data_.get(), dim.x * dim.y * dim.z * sizeof(T));
data_.swap(data);
std::swap(dim, dimensions_);
ownsDataPtr_ = true;
}
return *this;
}
template <typename T>
VolumeRAMPrecision<T>::~VolumeRAMPrecision() {
if (!ownsDataPtr_) data_.release();
}
template <typename T>
VolumeRAMPrecision<T>* VolumeRAMPrecision<T>::clone() const {
return new VolumeRAMPrecision<T>(*this);
}
template <typename T>
const T* inviwo::VolumeRAMPrecision<T>::getDataTyped() const {
return data_.get();
}
template <typename T>
T* inviwo::VolumeRAMPrecision<T>::getDataTyped() {
return data_.get();
}
template <typename T>
void* VolumeRAMPrecision<T>::getData() {
return data_.get();
}
template <typename T>
const void* VolumeRAMPrecision<T>::getData() const {
return const_cast<const T*>(data_.get());
}
template <typename T>
void* VolumeRAMPrecision<T>::getData(size_t pos) {
return data_.get() + pos;
}
template <typename T>
const void* VolumeRAMPrecision<T>::getData(size_t pos) const {
return const_cast<const T*>(data_.get()) + pos;
}
template <typename T>
void VolumeRAMPrecision<T>::setData(void* d, size3_t dimensions) {
std::unique_ptr<T[]> data(static_cast<T*>(d));
data_.swap(data);
std::swap(dimensions_, dimensions);
if (!ownsDataPtr_) data.release();
ownsDataPtr_ = true;
}
template <typename T>
void VolumeRAMPrecision<T>::removeDataOwnership() {
ownsDataPtr_ = false;
}
template <typename T>
const size3_t& VolumeRAMPrecision<T>::getDimensions() const {
return dimensions_;
}
template <typename T>
size_t VolumeRAMPrecision<T>::getNumberOfBytes() const {
return dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T);
}
template <typename T>
void VolumeRAMPrecision<T>::setDimensions(size3_t dimensions) {
auto data = util::make_unique<T[]>(dimensions.x * dimensions.y * dimensions.z);
data_.swap(data);
dimensions_ = dimensions;
if (!ownsDataPtr_) data.release();
ownsDataPtr_ = true;
}
template <typename T>
double VolumeRAMPrecision<T>::getAsDouble(const size3_t& pos) const {
return util::glm_convert<double>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec2 VolumeRAMPrecision<T>::getAsDVec2(const size3_t& pos) const {
return util::glm_convert<dvec2>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec3 VolumeRAMPrecision<T>::getAsDVec3(const size3_t& pos) const {
return util::glm_convert<dvec3>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec4 VolumeRAMPrecision<T>::getAsDVec4(const size3_t& pos) const {
return util::glm_convert<dvec4>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDouble(const size3_t& pos, double val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec2(const size3_t& pos, dvec2 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec3(const size3_t& pos, dvec3 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec4(const size3_t& pos, dvec4 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
double VolumeRAMPrecision<T>::getAsNormalizedDouble(const size3_t& pos) const {
return util::glm_convert_normalized<double>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec2 VolumeRAMPrecision<T>::getAsNormalizedDVec2(const size3_t& pos) const {
return util::glm_convert_normalized<dvec2>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec3 VolumeRAMPrecision<T>::getAsNormalizedDVec3(const size3_t& pos) const {
return util::glm_convert_normalized<dvec3>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec4 VolumeRAMPrecision<T>::getAsNormalizedDVec4(const size3_t& pos) const {
return util::glm_convert_normalized<dvec4>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDouble(const size3_t& pos, double val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec2(const size3_t& pos, dvec2 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec3(const size3_t& pos, dvec3 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec4(const size3_t& pos, dvec4 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset,
const size3_t& subSize, const size3_t& subOffset) {
const T* srcData = reinterpret_cast<const T*>(src->getData());
size_t initialStartPos = (dstOffset.z * (dimensions_.x * dimensions_.y)) +
(dstOffset.y * dimensions_.x) + dstOffset.x;
size3_t srcDims = src->getDimensions();
size_t dataSize = subSize.x * getDataFormat()->getSize();
size_t volumePos;
size_t subVolumePos;
ivec3 subSizeI = ivec3(subSize);
#pragma omp parallel for
for (int zy = 0; zy < subSizeI.z * subSizeI.y; ++zy) {
int z = zy / subSizeI.y;
int y = zy % subSizeI.y;
volumePos = (y * dimensions_.x) + (z * dimensions_.x * dimensions_.y);
subVolumePos = ((y + subOffset.y) * srcDims.x) +
((z + subOffset.z) * srcDims.x * srcDims.y) + subOffset.x;
std::memcpy((data_.get() + volumePos + initialStartPos), (srcData + subVolumePos),
dataSize);
}
}
template <typename T>
const HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins,
size3_t sampleRate) const {
if (!hasHistograms()) {
bool stop = false;
calculateHistograms(bins, sampleRate, stop);
}
return &histCont_;
}
template <typename T>
HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) {
if (!hasHistograms()) {
bool stop = false;
calculateHistograms(bins, sampleRate, stop);
}
return &histCont_;
}
template <typename T>
void VolumeRAMPrecision<T>::calculateHistograms(size_t bins, size3_t sampleRate,
const bool& stop) const {
if (const auto volume = getOwner()) {
dvec2 dataRange = volume->dataMap_.dataRange;
histCont_ = util::calculateVolumeHistogram(data_.get(), dimensions_, dataRange, stop, bins,
sampleRate);
}
}
template <typename T>
bool VolumeRAMPrecision<T>::hasHistograms() const {
return !histCont_.empty() && histCont_.isValid();
}
} // namespace
#endif // IVW_VOLUMERAMPRECISION_H
|
libgomp.h | /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file contains data types and function declarations that are not
part of the official OpenACC or OpenMP user interfaces. There are
declarations in here that are part of the GNU Offloading and Multi
Processing ABI, in that the compiler is required to know about them
and use them.
The convention is that the all caps prefix "GOMP" is used group items
that are part of the external ABI, and the lower case prefix "gomp"
is used group items that are completely private to the library. */
#ifndef LIBGOMP_H
#define LIBGOMP_H 1
#ifndef _LIBGOMP_CHECKING_
/* Define to 1 to perform internal sanity checks. */
#define _LIBGOMP_CHECKING_ 0
#endif
#include "config.h"
#include "gstdint.h"
#include "libgomp-plugin.h"
#ifdef HAVE_PTHREAD_H
#include <pthread.h>
#endif
#include <stdbool.h>
#include <stdlib.h>
#include <stdarg.h>
/* Needed for memset in priority_queue.c. */
#if _LIBGOMP_CHECKING_
# ifdef STRING_WITH_STRINGS
# include <string.h>
# include <strings.h>
# else
# ifdef HAVE_STRING_H
# include <string.h>
# else
# ifdef HAVE_STRINGS_H
# include <strings.h>
# endif
# endif
# endif
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility push(hidden)
#endif
/* If we were a C++ library, we'd get this from <std/atomic>. */
enum memmodel
{
MEMMODEL_RELAXED = 0,
MEMMODEL_CONSUME = 1,
MEMMODEL_ACQUIRE = 2,
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5
};
/* alloc.c */
extern void *gomp_malloc (size_t) __attribute__((malloc));
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
extern void *gomp_realloc (void *, size_t);
/* Avoid conflicting prototypes of alloca() in system headers by using
GCC's builtin alloca(). */
#define gomp_alloca(x) __builtin_alloca(x)
/* error.c */
extern void gomp_vdebug (int, const char *, va_list);
extern void gomp_debug (int, const char *, ...)
__attribute__ ((format (printf, 2, 3)));
#define gomp_vdebug(KIND, FMT, VALIST) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_vdebug) ((KIND), (FMT), (VALIST)); \
} while (0)
#define gomp_debug(KIND, ...) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_debug) ((KIND), __VA_ARGS__); \
} while (0)
extern void gomp_verror (const char *, va_list);
extern void gomp_error (const char *, ...)
__attribute__ ((format (printf, 1, 2)));
extern void gomp_vfatal (const char *, va_list)
__attribute__ ((noreturn));
extern void gomp_fatal (const char *, ...)
__attribute__ ((noreturn, format (printf, 1, 2)));
struct gomp_task;
struct gomp_taskgroup;
struct htab;
#include "priority_queue.h"
#include "sem.h"
#include "mutex.h"
#include "bar.h"
#include "simple-bar.h"
#include "ptrlock.h"
/* This structure contains the data to control one work-sharing construct,
either a LOOP (FOR/DO) or a SECTIONS. */
enum gomp_schedule_type
{
GFS_RUNTIME,
GFS_STATIC,
GFS_DYNAMIC,
GFS_GUIDED,
GFS_AUTO
};
struct gomp_doacross_work_share
{
union {
/* chunk_size copy, as ws->chunk_size is multiplied by incr for
GFS_DYNAMIC. */
long chunk_size;
/* Likewise, but for ull implementation. */
unsigned long long chunk_size_ull;
/* For schedule(static,0) this is the number
of iterations assigned to the last thread, i.e. number of
iterations / number of threads. */
long q;
/* Likewise, but for ull implementation. */
unsigned long long q_ull;
};
/* Size of each array entry (padded to cache line size). */
unsigned long elt_sz;
/* Number of dimensions in sink vectors. */
unsigned int ncounts;
/* True if the iterations can be flattened. */
bool flattened;
/* Actual array (of elt_sz sized units), aligned to cache line size.
This is indexed by team_id for GFS_STATIC and outermost iteration
/ chunk_size for other schedules. */
unsigned char *array;
/* These two are only used for schedule(static,0). */
/* This one is number of iterations % number of threads. */
long t;
union {
/* And this one is cached t * (q + 1). */
long boundary;
/* Likewise, but for the ull implementation. */
unsigned long long boundary_ull;
};
/* Array of shift counts for each dimension if they can be flattened. */
unsigned int shift_counts[];
};
struct gomp_work_share
{
/* This member records the SCHEDULE clause to be used for this construct.
The user specification of "runtime" will already have been resolved.
If this is a SECTIONS construct, this value will always be DYNAMIC. */
enum gomp_schedule_type sched;
int mode;
union {
struct {
/* This is the chunk_size argument to the SCHEDULE clause. */
long chunk_size;
/* This is the iteration end point. If this is a SECTIONS construct,
this is the number of contained sections. */
long end;
/* This is the iteration step. If this is a SECTIONS construct, this
is always 1. */
long incr;
};
struct {
/* The same as above, but for the unsigned long long loop variants. */
unsigned long long chunk_size_ull;
unsigned long long end_ull;
unsigned long long incr_ull;
};
};
union {
/* This is a circular queue that details which threads will be allowed
into the ordered region and in which order. When a thread allocates
iterations on which it is going to work, it also registers itself at
the end of the array. When a thread reaches the ordered region, it
checks to see if it is the one at the head of the queue. If not, it
blocks on its RELEASE semaphore. */
unsigned *ordered_team_ids;
/* This is a pointer to DOACROSS work share data. */
struct gomp_doacross_work_share *doacross;
};
/* This is the number of threads that have registered themselves in
the circular queue ordered_team_ids. */
unsigned ordered_num_used;
/* This is the team_id of the currently acknowledged owner of the ordered
section, or -1u if the ordered section has not been acknowledged by
any thread. This is distinguished from the thread that is *allowed*
to take the section next. */
unsigned ordered_owner;
/* This is the index into the circular queue ordered_team_ids of the
current thread that's allowed into the ordered reason. */
unsigned ordered_cur;
/* This is a chain of allocated gomp_work_share blocks, valid only
in the first gomp_work_share struct in the block. */
struct gomp_work_share *next_alloc;
/* The above fields are written once during workshare initialization,
or related to ordered worksharing. Make sure the following fields
are in a different cache line. */
/* This lock protects the update of the following members. */
gomp_mutex_t lock __attribute__((aligned (64)));
/* This is the count of the number of threads that have exited the work
share construct. If the construct was marked nowait, they have moved on
to other work; otherwise they're blocked on a barrier. The last member
of the team to exit the work share construct must deallocate it. */
unsigned threads_completed;
union {
/* This is the next iteration value to be allocated. In the case of
GFS_STATIC loops, this the iteration start point and never changes. */
long next;
/* The same, but with unsigned long long type. */
unsigned long long next_ull;
/* This is the returned data structure for SINGLE COPYPRIVATE. */
void *copyprivate;
};
union {
/* Link to gomp_work_share struct for next work sharing construct
encountered after this one. */
gomp_ptrlock_t next_ws;
/* gomp_work_share structs are chained in the free work share cache
through this. */
struct gomp_work_share *next_free;
};
/* If only few threads are in the team, ordered_team_ids can point
to this array which fills the padding at the end of this struct. */
unsigned inline_ordered_team_ids[0];
};
/* This structure contains all of the thread-local data associated with
a thread team. This is the data that must be saved when a thread
encounters a nested PARALLEL construct. */
struct gomp_team_state
{
/* This is the team of which the thread is currently a member. */
struct gomp_team *team;
/* This is the work share construct which this thread is currently
processing. Recall that with NOWAIT, not all threads may be
processing the same construct. */
struct gomp_work_share *work_share;
/* This is the previous work share construct or NULL if there wasn't any.
When all threads are done with the current work sharing construct,
the previous one can be freed. The current one can't, as its
next_ws field is used. */
struct gomp_work_share *last_work_share;
/* This is the ID of this thread within the team. This value is
guaranteed to be between 0 and N-1, where N is the number of
threads in the team. */
unsigned team_id;
/* Nesting level. */
unsigned level;
/* Active nesting level. Only active parallel regions are counted. */
unsigned active_level;
/* Place-partition-var, offset and length into gomp_places_list array. */
unsigned place_partition_off;
unsigned place_partition_len;
#ifdef HAVE_SYNC_BUILTINS
/* Number of single stmts encountered. */
unsigned long single_count;
#endif
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
trip number through the loop. So first time a particular loop
is encountered this number is 0, the second time through the loop
is 1, etc. This is unused when the compiler knows in advance that
the loop is statically scheduled. */
unsigned long static_trip;
};
struct target_mem_desc;
/* These are the OpenMP 4.0 Internal Control Variables described in
section 2.3.1. Those described as having one copy per task are
stored within the structure; those described as having one copy
for the whole program are (naturally) global variables. */
struct gomp_task_icv
{
unsigned long nthreads_var;
enum gomp_schedule_type run_sched_var;
int run_sched_chunk_size;
int default_device_var;
unsigned int thread_limit_var;
bool dyn_var;
bool nest_var;
char bind_var;
/* Internal ICV. */
struct target_mem_desc *target_data;
};
extern struct gomp_task_icv gomp_global_icv;
#ifndef HAVE_SYNC_BUILTINS
extern gomp_mutex_t gomp_managed_threads_lock;
#endif
extern unsigned long gomp_max_active_levels_var;
extern bool gomp_cancel_var;
extern int gomp_max_task_priority_var;
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
extern unsigned long gomp_available_cpus, gomp_managed_threads;
extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len;
extern char *gomp_bind_var_list;
extern unsigned long gomp_bind_var_list_len;
extern void **gomp_places_list;
extern unsigned long gomp_places_list_len;
extern unsigned int gomp_num_teams_var;
extern int gomp_debug_var;
extern int goacc_device_num;
extern char *goacc_device_type;
enum gomp_task_kind
{
/* Implicit task. */
GOMP_TASK_IMPLICIT,
/* Undeferred task. */
GOMP_TASK_UNDEFERRED,
/* Task created by GOMP_task and waiting to be run. */
GOMP_TASK_WAITING,
/* Task currently executing or scheduled and about to execute. */
GOMP_TASK_TIED,
/* Used for target tasks that have vars mapped and async run started,
but not yet completed. Once that completes, they will be readded
into the queues as GOMP_TASK_WAITING in order to perform the var
unmapping. */
GOMP_TASK_ASYNC_RUNNING
};
struct gomp_task_depend_entry
{
/* Address of dependency. */
void *addr;
struct gomp_task_depend_entry *next;
struct gomp_task_depend_entry *prev;
/* Task that provides the dependency in ADDR. */
struct gomp_task *task;
/* Depend entry is of type "IN". */
bool is_in;
bool redundant;
bool redundant_out;
};
struct gomp_dependers_vec
{
size_t n_elem;
size_t allocated;
struct gomp_task *elem[];
};
/* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */
struct gomp_taskwait
{
bool in_taskwait;
bool in_depend_wait;
/* Number of tasks we are waiting for. */
size_t n_depend;
gomp_sem_t taskwait_sem;
};
/* This structure describes a "task" to be run by a thread. */
struct gomp_task
{
/* Parent of this task. */
struct gomp_task *parent;
/* Children of this task. */
struct priority_queue children_queue;
/* Taskgroup this task belongs in. */
struct gomp_taskgroup *taskgroup;
/* Tasks that depend on this task. */
struct gomp_dependers_vec *dependers;
struct htab *depend_hash;
struct gomp_taskwait *taskwait;
/* Number of items in DEPEND. */
size_t depend_count;
/* Number of tasks this task depends on. Once this counter reaches
0, we have no unsatisfied dependencies, and this task can be put
into the various queues to be scheduled. */
size_t num_dependees;
/* Priority of this task. */
int priority;
/* The priority node for this task in each of the different queues.
We put this here to avoid allocating space for each priority
node. Then we play offsetof() games to convert between pnode[]
entries and the gomp_task in which they reside. */
struct priority_node pnode[3];
struct gomp_task_icv icv;
void (*fn) (void *);
void *fn_data;
enum gomp_task_kind kind;
bool in_tied_task;
bool final_task;
bool copy_ctors_done;
/* Set for undeferred tasks with unsatisfied dependencies which
block further execution of their parent until the dependencies
are satisfied. */
bool parent_depends_on;
/* Dependencies provided and/or needed for this task. DEPEND_COUNT
is the number of items available. */
struct gomp_task_depend_entry depend[];
};
/* This structure describes a single #pragma omp taskgroup. */
struct gomp_taskgroup
{
struct gomp_taskgroup *prev;
/* Queue of tasks that belong in this taskgroup. */
struct priority_queue taskgroup_queue;
bool in_taskgroup_wait;
bool cancelled;
gomp_sem_t taskgroup_sem;
size_t num_children;
};
/* Various state of OpenMP async offloading tasks. */
enum gomp_target_task_state
{
GOMP_TARGET_TASK_DATA,
GOMP_TARGET_TASK_BEFORE_MAP,
GOMP_TARGET_TASK_FALLBACK,
GOMP_TARGET_TASK_READY_TO_RUN,
GOMP_TARGET_TASK_RUNNING,
GOMP_TARGET_TASK_FINISHED
};
/* This structure describes a target task. */
struct gomp_target_task
{
struct gomp_device_descr *devicep;
void (*fn) (void *);
size_t mapnum;
size_t *sizes;
unsigned short *kinds;
unsigned int flags;
enum gomp_target_task_state state;
struct target_mem_desc *tgt;
struct gomp_task *task;
struct gomp_team *team;
/* Device-specific target arguments. */
void **args;
void *hostaddrs[];
};
/* This structure describes a "team" of threads. These are the threads
that are spawned by a PARALLEL constructs, as well as the work sharing
constructs that the team encounters. */
struct gomp_team
{
/* This is the number of threads in the current team. */
unsigned nthreads;
/* This is number of gomp_work_share structs that have been allocated
as a block last time. */
unsigned work_share_chunk;
/* This is the saved team state that applied to a master thread before
the current thread was created. */
struct gomp_team_state prev_ts;
/* This semaphore should be used by the master thread instead of its
"native" semaphore in the thread structure. Required for nested
parallels, as the master is a member of two teams. */
gomp_sem_t master_release;
/* This points to an array with pointers to the release semaphore
of the threads in the team. */
gomp_sem_t **ordered_release;
/* List of work shares on which gomp_fini_work_share hasn't been
called yet. If the team hasn't been cancelled, this should be
equal to each thr->ts.work_share, but otherwise it can be a possibly
long list of workshares. */
struct gomp_work_share *work_shares_to_free;
/* List of gomp_work_share structs chained through next_free fields.
This is populated and taken off only by the first thread in the
team encountering a new work sharing construct, in a critical
section. */
struct gomp_work_share *work_share_list_alloc;
/* List of gomp_work_share structs freed by free_work_share. New
entries are atomically added to the start of the list, and
alloc_work_share can safely only move all but the first entry
to work_share_list alloc, as free_work_share can happen concurrently
with alloc_work_share. */
struct gomp_work_share *work_share_list_free;
#ifdef HAVE_SYNC_BUILTINS
/* Number of simple single regions encountered by threads in this
team. */
unsigned long single_count;
#else
/* Mutex protecting addition of workshares to work_share_list_free. */
gomp_mutex_t work_share_list_free_lock;
#endif
/* This barrier is used for most synchronization of the team. */
gomp_barrier_t barrier;
/* Initial work shares, to avoid allocating any gomp_work_share
structs in the common case. */
struct gomp_work_share work_shares[8];
gomp_mutex_t task_lock;
/* Scheduled tasks. */
struct priority_queue task_queue;
/* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */
unsigned int task_count;
/* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */
unsigned int task_queued_count;
/* Number of GOMP_TASK_{WAITING,TIED} tasks currently running
directly in gomp_barrier_handle_tasks; tasks spawned
from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when
that is called from a task run from gomp_barrier_handle_tasks.
task_running_count should be always <= team->nthreads,
and if current task isn't in_tied_task, then it will be
even < team->nthreads. */
unsigned int task_running_count;
int work_share_cancelled;
int team_cancelled;
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
/* This structure contains all data that is private to libgomp and is
allocated per thread. */
struct gomp_thread
{
/* This is the function that the thread should run upon launch. */
void (*fn) (void *data);
void *data;
/* This is the current team state for this thread. The ts.team member
is NULL only if the thread is idle. */
struct gomp_team_state ts;
/* This is the task that the thread is currently executing. */
struct gomp_task *task;
/* This semaphore is used for ordered loops. */
gomp_sem_t release;
/* Place this thread is bound to plus one, or zero if not bound
to any place. */
unsigned int place;
/* User pthread thread pool */
struct gomp_thread_pool *thread_pool;
};
struct gomp_thread_pool
{
/* This array manages threads spawned from the top level, which will
return to the idle loop once the current PARALLEL construct ends. */
struct gomp_thread **threads;
unsigned threads_size;
unsigned threads_used;
/* The last team is used for non-nested teams to delay their destruction to
make sure all the threads in the team move on to the pool's barrier before
the team's barrier is destroyed. */
struct gomp_team *last_team;
/* Number of threads running in this contention group. */
unsigned long threads_busy;
/* This barrier holds and releases threads waiting in thread pools. */
gomp_simple_barrier_t threads_dock;
};
enum gomp_cancel_kind
{
GOMP_CANCEL_PARALLEL = 1,
GOMP_CANCEL_LOOP = 2,
GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP,
GOMP_CANCEL_DO = GOMP_CANCEL_LOOP,
GOMP_CANCEL_SECTIONS = 4,
GOMP_CANCEL_TASKGROUP = 8
};
/* ... and here is that TLS data. */
#if defined __nvptx__
extern struct gomp_thread *nvptx_thrs __attribute__((shared));
static inline struct gomp_thread *gomp_thread (void)
{
int tid;
asm ("mov.u32 %0, %%tid.y;" : "=r" (tid));
return nvptx_thrs + tid;
}
#elif defined HAVE_TLS || defined USE_EMUTLS
extern __thread struct gomp_thread gomp_tls_data;
static inline struct gomp_thread *gomp_thread (void)
{
return &gomp_tls_data;
}
#else
extern pthread_key_t gomp_tls_key;
static inline struct gomp_thread *gomp_thread (void)
{
return pthread_getspecific (gomp_tls_key);
}
#endif
extern struct gomp_task_icv *gomp_new_icv (void);
/* Here's how to access the current copy of the ICVs. */
static inline struct gomp_task_icv *gomp_icv (bool write)
{
struct gomp_task *task = gomp_thread ()->task;
if (task)
return &task->icv;
else if (write)
return gomp_new_icv ();
else
return &gomp_global_icv;
}
#ifdef LIBGOMP_USE_PTHREADS
/* The attributes to be used during thread creation. */
extern pthread_attr_t gomp_thread_attr;
extern pthread_key_t gomp_thread_destructor;
#endif
/* Function prototypes. */
/* affinity.c */
extern void gomp_init_affinity (void);
#ifdef LIBGOMP_USE_PTHREADS
extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int);
#endif
extern void **gomp_affinity_alloc (unsigned long, bool);
extern void gomp_affinity_init_place (void *);
extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long,
long, bool);
extern bool gomp_affinity_remove_cpu (void *, unsigned long);
extern bool gomp_affinity_copy_place (void *, void *, long);
extern bool gomp_affinity_same_place (void *, void *);
extern bool gomp_affinity_finalize_place_list (bool);
extern bool gomp_affinity_init_level (int, unsigned long, bool);
extern void gomp_affinity_print_place (void *);
extern void gomp_get_place_proc_ids_8 (int, int64_t *);
/* iter.c */
extern int gomp_iter_static_next (long *, long *);
extern bool gomp_iter_dynamic_next_locked (long *, long *);
extern bool gomp_iter_guided_next_locked (long *, long *);
#ifdef HAVE_SYNC_BUILTINS
extern bool gomp_iter_dynamic_next (long *, long *);
extern bool gomp_iter_guided_next (long *, long *);
#endif
/* iter_ull.c */
extern int gomp_iter_ull_static_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
unsigned long long *);
#if defined HAVE_SYNC_BUILTINS && defined __LP64__
extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_guided_next (unsigned long long *,
unsigned long long *);
#endif
/* ordered.c */
extern void gomp_ordered_first (void);
extern void gomp_ordered_last (void);
extern void gomp_ordered_next (void);
extern void gomp_ordered_static_init (void);
extern void gomp_ordered_static_next (void);
extern void gomp_ordered_sync (void);
extern void gomp_doacross_init (unsigned, long *, long);
extern void gomp_doacross_ull_init (unsigned, unsigned long long *,
unsigned long long);
/* parallel.c */
extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
/* proc.c (in config/) */
extern void gomp_init_num_threads (void);
extern unsigned gomp_dynamic_max_threads (void);
/* task.c */
extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
struct gomp_task_icv *);
extern void gomp_end_task (void);
extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
extern void gomp_task_maybe_wait_for_dependencies (void **);
extern bool gomp_create_target_task (struct gomp_device_descr *,
void (*) (void *), size_t, void **,
size_t *, unsigned short *, unsigned int,
void **, void **,
enum gomp_target_task_state);
static void inline
gomp_finish_task (struct gomp_task *task)
{
if (__builtin_expect (task->depend_hash != NULL, 0))
free (task->depend_hash);
}
/* team.c */
extern struct gomp_team *gomp_new_team (unsigned);
extern void gomp_team_start (void (*) (void *), void *, unsigned,
unsigned, struct gomp_team *);
extern void gomp_team_end (void);
extern void gomp_free_thread (void *);
/* target.c */
extern void gomp_init_targets_once (void);
extern int gomp_get_num_devices (void);
extern bool gomp_target_task_fn (void *);
/* Splay tree definitions. */
typedef struct splay_tree_node_s *splay_tree_node;
typedef struct splay_tree_s *splay_tree;
typedef struct splay_tree_key_s *splay_tree_key;
struct target_var_desc {
/* Splay key. */
splay_tree_key key;
/* True if data should be copied from device to host at the end. */
bool copy_from;
/* True if data always should be copied from device to host at the end. */
bool always_copy_from;
/* Relative offset against key host_start. */
uintptr_t offset;
/* Actual length. */
uintptr_t length;
};
struct target_mem_desc {
/* Reference count. */
uintptr_t refcount;
/* All the splay nodes allocated together. */
splay_tree_node array;
/* Start of the target region. */
uintptr_t tgt_start;
/* End of the targer region. */
uintptr_t tgt_end;
/* Handle to free. */
void *to_free;
/* Previous target_mem_desc. */
struct target_mem_desc *prev;
/* Number of items in following list. */
size_t list_count;
/* Corresponding target device descriptor. */
struct gomp_device_descr *device_descr;
/* List of target items to remove (or decrease refcount)
at the end of region. */
struct target_var_desc list[];
};
/* Special value for refcount - infinity. */
#define REFCOUNT_INFINITY (~(uintptr_t) 0)
/* Special value for refcount - tgt_offset contains target address of the
artificial pointer to "omp declare target link" object. */
#define REFCOUNT_LINK (~(uintptr_t) 1)
struct splay_tree_key_s {
/* Address of the host object. */
uintptr_t host_start;
/* Address immediately after the host object. */
uintptr_t host_end;
/* Descriptor of the target memory. */
struct target_mem_desc *tgt;
/* Offset from tgt->tgt_start to the start of the target object. */
uintptr_t tgt_offset;
/* Reference count. */
uintptr_t refcount;
/* Pointer to the original mapping of "omp declare target link" object. */
splay_tree_key link_key;
};
/* The comparison function. */
static inline int
splay_compare (splay_tree_key x, splay_tree_key y)
{
if (x->host_start == x->host_end
&& y->host_start == y->host_end)
return 0;
if (x->host_end <= y->host_start)
return -1;
if (x->host_start >= y->host_end)
return 1;
return 0;
}
#include "splay-tree.h"
typedef struct acc_dispatch_t
{
/* This is a linked list of data mapped using the
acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas.
Unlike mapped_data in the goacc_thread struct, unmapping can
happen out-of-order with respect to mapping. */
/* This is guarded by the lock in the "outer" struct gomp_device_descr. */
struct target_mem_desc *data_environ;
/* Execute. */
__typeof (GOMP_OFFLOAD_openacc_exec) *exec_func;
/* Async cleanup callback registration. */
__typeof (GOMP_OFFLOAD_openacc_register_async_cleanup)
*register_async_cleanup_func;
/* Asynchronous routines. */
__typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func;
__typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all_async)
*async_wait_all_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func;
/* Create/destroy TLS data. */
__typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func;
__typeof (GOMP_OFFLOAD_openacc_destroy_thread_data)
*destroy_thread_data_func;
/* NVIDIA target specific routines. */
struct {
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device)
*get_current_device_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context)
*get_current_context_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func;
} cuda;
} acc_dispatch_t;
/* Various state of the accelerator device. */
enum gomp_device_state
{
GOMP_DEVICE_UNINITIALIZED,
GOMP_DEVICE_INITIALIZED,
GOMP_DEVICE_FINALIZED
};
/* This structure describes accelerator device.
It contains name of the corresponding libgomp plugin, function handlers for
interaction with the device, ID-number of the device, and information about
mapped memory. */
struct gomp_device_descr
{
/* Immutable data, which is only set during initialization, and which is not
guarded by the lock. */
/* The name of the device. */
const char *name;
/* Capabilities of device (supports OpenACC, OpenMP). */
unsigned int capabilities;
/* This is the ID number of device among devices of the same type. */
int target_id;
/* This is the TYPE of device. */
enum offload_target_type type;
/* Function handlers. */
__typeof (GOMP_OFFLOAD_get_name) *get_name_func;
__typeof (GOMP_OFFLOAD_get_caps) *get_caps_func;
__typeof (GOMP_OFFLOAD_get_type) *get_type_func;
__typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func;
__typeof (GOMP_OFFLOAD_init_device) *init_device_func;
__typeof (GOMP_OFFLOAD_fini_device) *fini_device_func;
__typeof (GOMP_OFFLOAD_version) *version_func;
__typeof (GOMP_OFFLOAD_load_image) *load_image_func;
__typeof (GOMP_OFFLOAD_unload_image) *unload_image_func;
__typeof (GOMP_OFFLOAD_alloc) *alloc_func;
__typeof (GOMP_OFFLOAD_free) *free_func;
__typeof (GOMP_OFFLOAD_dev2host) *dev2host_func;
__typeof (GOMP_OFFLOAD_host2dev) *host2dev_func;
__typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func;
__typeof (GOMP_OFFLOAD_can_run) *can_run_func;
__typeof (GOMP_OFFLOAD_run) *run_func;
__typeof (GOMP_OFFLOAD_async_run) *async_run_func;
/* Splay tree containing information about mapped memory regions. */
struct splay_tree_s mem_map;
/* Mutex for the mutable data. */
gomp_mutex_t lock;
/* Current state of the device. OpenACC allows to move from INITIALIZED state
back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED
to FINALIZED state (at program shutdown). */
enum gomp_device_state state;
/* OpenACC-specific data and functions. */
/* This is mutable because of its mutable data_environ and target_data
members. */
acc_dispatch_t openacc;
};
/* Kind of the pragma, for which gomp_map_vars () is called. */
enum gomp_map_vars_kind
{
GOMP_MAP_VARS_OPENACC,
GOMP_MAP_VARS_TARGET,
GOMP_MAP_VARS_DATA,
GOMP_MAP_VARS_ENTER_DATA
};
extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *);
extern void gomp_acc_remove_pointer (void *, bool, int, int);
extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *,
size_t, void **, void **,
size_t *, void *, bool,
enum gomp_map_vars_kind);
extern void gomp_unmap_vars (struct target_mem_desc *, bool);
extern void gomp_init_device (struct gomp_device_descr *);
extern void gomp_free_memmap (struct splay_tree_s *);
extern void gomp_unload_device (struct gomp_device_descr *);
/* work.c */
extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
extern void gomp_fini_work_share (struct gomp_work_share *);
extern bool gomp_work_share_start (bool);
extern void gomp_work_share_end (void);
extern bool gomp_work_share_end_cancel (void);
extern void gomp_work_share_end_nowait (void);
static inline void
gomp_work_share_init_done (void)
{
struct gomp_thread *thr = gomp_thread ();
if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
}
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility pop
#endif
/* Now that we're back to default visibility, include the globals. */
#include "libgomp_g.h"
/* Include omp.h by parts. */
#include "omp-lock.h"
#define _LIBGOMP_OMP_LOCK_DEFINED 1
#include "omp.h.in"
#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
|| !defined (HAVE_ATTRIBUTE_ALIAS) \
|| !defined (HAVE_AS_SYMVER_DIRECTIVE) \
|| !defined (PIC) \
|| !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT)
# undef LIBGOMP_GNU_SYMBOL_VERSIONING
#endif
#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
# define strong_alias(fn, al) \
extern __typeof (fn) al __attribute__ ((alias (#fn)));
# define omp_lock_symver(fn) \
__asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
__asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
#else
# define gomp_init_lock_30 omp_init_lock
# define gomp_destroy_lock_30 omp_destroy_lock
# define gomp_set_lock_30 omp_set_lock
# define gomp_unset_lock_30 omp_unset_lock
# define gomp_test_lock_30 omp_test_lock
# define gomp_init_nest_lock_30 omp_init_nest_lock
# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
# define gomp_set_nest_lock_30 omp_set_nest_lock
# define gomp_unset_nest_lock_30 omp_unset_nest_lock
# define gomp_test_nest_lock_30 omp_test_nest_lock
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# define attribute_hidden __attribute__ ((visibility ("hidden")))
#else
# define attribute_hidden
#endif
#ifdef HAVE_ATTRIBUTE_ALIAS
# define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__)
# define ialias_str1(x) ialias_str2(x)
# define ialias_str2(x) #x
# define ialias(fn) \
extern __typeof (fn) gomp_ialias_##fn \
__attribute__ ((alias (#fn))) attribute_hidden;
# define ialias_redirect(fn) \
extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden;
# define ialias_call(fn) gomp_ialias_ ## fn
#else
# define ialias(fn)
# define ialias_redirect(fn)
# define ialias_call(fn) fn
#endif
/* Helper function for priority_node_to_task() and
task_to_priority_node().
Return the offset from a task to its priority_node entry. The
priority_node entry is has a type of TYPE. */
static inline size_t
priority_queue_offset (enum priority_queue_type type)
{
return offsetof (struct gomp_task, pnode[(int) type]);
}
/* Return the task associated with a priority NODE of type TYPE. */
static inline struct gomp_task *
priority_node_to_task (enum priority_queue_type type,
struct priority_node *node)
{
return (struct gomp_task *) ((char *) node - priority_queue_offset (type));
}
/* Return the priority node of type TYPE for a given TASK. */
static inline struct priority_node *
task_to_priority_node (enum priority_queue_type type,
struct gomp_task *task)
{
return (struct priority_node *) ((char *) task
+ priority_queue_offset (type));
}
#endif /* LIBGOMP_H */
|
mediancut.c | /*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdlib.h>
#include <stddef.h>
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float))
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]);
struct box {
f_pixel color;
f_pixel variance;
double sum, total_error, max_error;
unsigned int ind;
unsigned int colors;
};
ALWAYS_INLINE static double variance_diff(double val, const double good_enough);
inline static double variance_diff(double val, const double good_enough)
{
val *= val;
if (val < good_enough*good_enough) return val*0.25;
return val;
}
/** Weighted per-channel variance of the box. It's used to decide which channel to split by */
static f_pixel box_variance(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double variancea=0, variancer=0, varianceg=0, varianceb=0;
for(unsigned int i = 0; i < box->colors; ++i) {
const f_pixel px = achv[box->ind + i].acolor;
double weight = achv[box->ind + i].adjusted_weight;
variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight;
variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight;
varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight;
varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight;
}
return (f_pixel){
.a = variancea*(4.0/16.0),
.r = variancer*(7.0/16.0),
.g = varianceg*(9.0/16.0),
.b = varianceb*(5.0/16.0),
};
}
static double box_max_error(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double max_error = 0;
for(unsigned int i = 0; i < box->colors; ++i) {
const double diff = colordifference(mean, achv[box->ind + i].acolor);
if (diff > max_error) {
max_error = diff;
}
}
return max_error;
}
ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h);
static inline void hist_item_swap(hist_item *l, hist_item *r)
{
if (l != r) {
hist_item t = *l;
*l = *r;
*r = t;
}
}
ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len);
inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len)
{
if (len < 32) {
return len/2;
}
const unsigned int aidx=8, bidx=len/2, cidx=len-1;
const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value;
return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx ))
: ((b > c) ? bidx : ((a < c) ? aidx : cidx ));
}
ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len);
inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len)
{
unsigned int l = 1, r = len;
if (len >= 8) {
hist_item_swap(&base[0], &base[qsort_pivot(base,len)]);
}
const unsigned int pivot_value = base[0].tmp.sort_value;
while (l < r) {
if (base[l].tmp.sort_value >= pivot_value) {
l++;
} else {
while(l < --r && base[r].tmp.sort_value <= pivot_value) {}
hist_item_swap(&base[l], &base[r]);
}
}
l--;
hist_item_swap(&base[0], &base[l]);
return l;
}
/** quick select algorithm */
static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start)
{
for(;;) {
const unsigned int l = qsort_partition(base, len), r = l+1;
if (l > 0 && sort_start < l) {
len = l;
}
else if (r < len && sort_start > r) {
base += r; len -= r; sort_start -= r;
}
else break;
}
}
/** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */
static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar)
{
do {
const unsigned int l = qsort_partition(base, len), r = l+1;
// check if sum of left side is smaller than half,
// if it is, then it doesn't need to be sorted
unsigned int t = 0; double tmpsum = *lowervar;
while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight;
if (tmpsum < halfvar) {
*lowervar = tmpsum;
} else {
if (l > 0) {
hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar);
if (res) return res;
} else {
// End of left recursion. This will be executed in order from the first element.
*lowervar += base[0].color_weight;
if (*lowervar > halfvar) return &base[0];
}
}
if (len > r) {
base += r; len -= r; // tail-recursive "call"
} else {
*lowervar += base[r].color_weight;
return (*lowervar > halfvar) ? &base[r] : NULL;
}
} while(1);
}
static f_pixel get_median(const struct box *b, hist_item achv[]);
typedef struct {
unsigned int chan; float variance;
} channelvariance;
static int comparevariance(const void *ch1, const void *ch2)
{
return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 :
(((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0);
}
/** Finds which channels need to be sorted first and preproceses achv for fast sort */
static double prepare_sort(struct box *b, hist_item achv[])
{
/*
** Sort dimensions by their variance, and then sort colors first by dimension with highest variance
*/
channelvariance channels[4] = {
{index_of_channel(a), b->variance.a},
{index_of_channel(r), b->variance.r},
{index_of_channel(g), b->variance.g},
{index_of_channel(b), b->variance.b},
};
qsort(channels, 4, sizeof(channels[0]), comparevariance);
const unsigned int ind1 = b->ind;
const unsigned int colors = b->colors;
#if __GNUC__ >= 9
#pragma omp parallel for if (colors > 25000) \
schedule(static) default(none) shared(achv, channels, colors, ind1)
#else
#pragma omp parallel for if (colors > 25000) \
schedule(static) default(none) shared(achv, channels)
#endif
for(unsigned int i=0; i < colors; i++) {
const float *chans = (const float *)&achv[ind1 + i].acolor;
// Only the first channel really matters. When trying median cut many times
// with different histogram weights, I don't want sort randomness to influence outcome.
achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) |
(unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0);
}
const f_pixel median = get_median(b, achv);
// box will be split to make color_weight of each side even
const unsigned int ind = b->ind, end = ind+b->colors;
double totalvar = 0;
#pragma omp parallel for if (end - ind > 15000) \
schedule(static) default(shared) reduction(+:totalvar)
for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j]));
return totalvar / 2.0;
}
/** finds median in unsorted set by sorting only minimum required */
static f_pixel get_median(const struct box *b, hist_item achv[])
{
const unsigned int median_start = (b->colors-1)/2;
hist_item_sort_range(&(achv[b->ind]), b->colors,
median_start);
if (b->colors&1) return achv[b->ind + median_start].acolor;
// technically the second color is not guaranteed to be sorted correctly
// but most of the time it is good enough to be useful
return averagepixels(2, &achv[b->ind + median_start]);
}
/*
** Find the best splittable box. -1 if no boxes are splittable.
*/
static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse)
{
int bi=-1; double maxsum=0;
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].colors < 2) {
continue;
}
// looks only at max variance, because it's only going to split by it
const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b));
double thissum = bv[i].sum * MAX(bv[i].variance.a, cv);
if (bv[i].max_error > max_mse) {
thissum = thissum* bv[i].max_error/max_mse;
}
if (thissum > maxsum) {
maxsum = thissum;
bi = i;
}
}
return bi;
}
inline static double color_weight(f_pixel median, hist_item h)
{
float diff = colordifference(median, h.acolor);
return __builtin_sqrtf(diff) * (__builtin_sqrtf(1.0+h.adjusted_weight)-1.0);
}
static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv);
static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes);
static double box_error(const struct box *box, const hist_item achv[])
{
f_pixel avg = box->color;
double total_error=0;
for (unsigned int i = 0; i < box->colors; ++i) {
total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight;
}
return total_error;
}
static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist)
{
target_mse *= hist->total_perceptual_weight;
double total_error=0;
for(unsigned int i=0; i < boxes; i++) {
// error is (re)calculated lazily
if (bv[i].total_error >= 0) {
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].total_error < 0) {
bv[i].total_error = box_error(&bv[i], hist->achv);
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
return true;
}
static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) {
box->ind = ind;
box->colors = colors;
box->sum = sum;
box->total_error = -1;
box->color = averagepixels(colors, &achv[ind]);
box->variance = box_variance(achv, box);
box->max_error = box_max_error(achv, box);
}
/*
** Here is the fun part, the median-cut colormap generator. This is based
** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer
** Display," SIGGRAPH 1982 Proceedings, page 297.
*/
LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*))
{
hist_item *achv = hist->achv;
LIQ_ARRAY(struct box, bv, newcolors);
unsigned int boxes = 1;
/*
** Set up the initial box.
*/
{
double sum = 0;
for(unsigned int i=0; i < hist->size; i++) {
sum += achv[i].adjusted_weight;
}
box_init(&bv[0], achv, 0, hist->size, sum);
/*
** Main loop: split boxes until we have enough.
*/
while (boxes < newcolors) {
// first splits boxes that exceed quality limit (to have colors for things like odd green pixel),
// later raises the limit to allow large smooth areas/gradients get colors.
const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse;
const int bi = best_splittable_box(bv, boxes, current_max_mse);
if (bi < 0) {
break; /* ran out of colors! */
}
unsigned int indx = bv[bi].ind;
unsigned int clrs = bv[bi].colors;
/*
Classic implementation tries to get even number of colors or pixels in each subdivision.
Here, instead of popularity I use (sqrt(popularity)*variance) metric.
Each subdivision balances number of pixels (popular colors) and low variance -
boxes can be large if they have similar colors. Later boxes with high variance
will be more likely to be split.
Median used as expected value gives much better results than mean.
*/
const double halfvar = prepare_sort(&bv[bi], achv);
double lowervar=0;
// hist_item_sort_halfvar sorts and sums lowervar at the same time
// returns item to break at …minus one, which does smell like an off-by-one error.
hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar);
unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1);
/*
** Split the box.
*/
double sm = bv[bi].sum;
double lowersum = 0;
for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight;
box_init(&bv[bi], achv, indx, break_at, lowersum);
box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum);
++boxes;
if (total_box_error_below_target(target_mse, bv, boxes, hist)) {
break;
}
}
}
colormap *map = pam_colormap(boxes, malloc, free);
set_colormap_from_boxes(map, bv, boxes, achv);
adjust_histogram(achv, bv, boxes);
return map;
}
static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv)
{
/*
** Ok, we've got enough boxes. Now choose a representative color for
** each box. There are a number of possible ways to make this choice.
** One would be to choose the center of the box; this ignores any structure
** within the boxes. Another method would be to average all the colors in
** the box - this is the method specified in Heckbert's paper.
*/
for(unsigned int bi = 0; bi < boxes; ++bi) {
map->palette[bi].acolor = bv[bi].color;
/* store total color popularity (perceptual_weight is approximation of it) */
map->palette[bi].popularity = 0;
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
map->palette[bi].popularity += achv[i].perceptual_weight;
}
}
}
/* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */
static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes)
{
for(unsigned int bi = 0; bi < boxes; ++bi) {
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
achv[i].tmp.likely_colormap_index = bi;
}
}
}
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[])
{
double r = 0, g = 0, b = 0, a = 0, sum = 0;
#pragma omp parallel for if (clrs > 25000) \
schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum)
for(unsigned int i = 0; i < clrs; i++) {
const f_pixel px = achv[i].acolor;
const double weight = achv[i].adjusted_weight;
sum += weight;
a += px.a * weight;
r += px.r * weight;
g += px.g * weight;
b += px.b * weight;
}
if (sum) {
a /= sum;
r /= sum;
g /= sum;
b /= sum;
}
assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a));
return (f_pixel){.r=r, .g=g, .b=b, .a=a};
}
|
sp.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - SP
This benchmark is an OpenMP C version of the NPB SP code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: R. Van der Wijngaart
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "../math/nas_math.h"
#include "../paging_benchmark.h"
#include <nautilus/nautilus.h>
#include <nautilus/shell.h>
/* global variables */
#include "header.h"
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void ninvr(void);
static void pinvr(void);
static void compute_rhs(void);
static void set_constants(void);
static void txinvr(void);
static void tzetar(void);
static void verify(int no_time_steps, char *class, boolean *verified);
static void x_solve(void);
static void y_solve(void);
static void z_solve(void);
/*--------------------------------------------------------------------
program SP
c-------------------------------------------------------------------*/
static int program_SP(char *_buf, void* _priv);
static struct shell_cmd_impl nas_sp_impl = {
.cmd = "nas-sp",
.help_str = "NAS parallel benchmark SP",
.handler = program_SP,
};
nk_register_shell_cmd(nas_sp_impl);
#ifdef NAUT_CONFIG_ASPACE_PAGING
int program_SP_paging(char * _buf, void *_priv){
return paging_wrapper(_buf, _priv, &program_SP);
}
static struct shell_cmd_impl nas_is_paging_impl = {
.cmd = "nas-sp-paging",
.help_str = "NAS parallel benchmark SP with paging",
.handler = program_SP_paging,
};
nk_register_shell_cmd(nas_is_paging_impl);
#endif
int program_SP(char* _buf, void * _priv) {
int niter, step;
double mflops, tmax;
int nthreads = 1;
boolean verified;
char class;
//FILE *fp;
/*--------------------------------------------------------------------
c Read input file (if it exists), else take
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - SP Benchmark\n\n");
/* fp = fopen("inputsp.data", "r"); */
/* if (fp != NULL) { */
/* printf(" Reading from input file inputsp.data\n"); */
/* fscanf(fp, "%d", &niter); */
/* while (fgetc(fp) != '\n'); */
/* fscanf(fp, "%lf", &dt); */
/* while (fgetc(fp) != '\n'); */
/* fscanf(fp, "%d%d%d", */
/* &grid_points[0], &grid_points[1], &grid_points[2]); */
/* fclose(fp); */
/* } else { */
/* printf(" No input file inputsp.data. Using compiled defaults"); */
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
// }
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if ( (grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) ) {
printf("%d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
timer_clear(1);
timer_start(1);
for (step = 1; step <= niter; step++) {
if (step % 20 == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &class, &verified);
if (tmax != 0) {
mflops = ( 881.174 * ((double)PROBLEM_SIZE*PROBLEM_SIZE*PROBLEM_SIZE)
- 4683.91 * pow2((double)PROBLEM_SIZE)
+ 11484.5 * (double)PROBLEM_SIZE
- 19272.4) * (double)niter / (tmax*1000000.0);
} else {
mflops = 0.0;
}
c_print_results("SP", class, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
return 0;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void add(void) {
int i, j, k, m;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
#pragma omp for
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
compute_rhs();
txinvr();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[m][i][j][k] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-2; i++) {
for (j = 0; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-2; k++) {
for (m = 0; m < 5; m++) {
add = rhs[m][i][j][k];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k= 0; k <= grid_points[2]-1; k++) {
forcing[m][i][j][k] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][i] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][i] = dtpp * dtemp[m];
}
cuf[i] = buf[1][i] * buf[1][i];
buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i];
q[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i]
+ buf[3][i]*ue[3][i]);
}
for (i = 1; i <= grid_points[0]-2; i++) {
im1 = i-1;
ip1 = i+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tx2*( ue[1][ip1]-ue[1][im1] )+
dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))-
(ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+
xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+
dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+
xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+
dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+
xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+
dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])-
buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+
0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+
buf[0][im1])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+
dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]);
i = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][i-1] + 6.0*ue[m][i] -
4.0*ue[m][i+1] + ue[m][i+2]);
}
for (m = 0; m < 5; m++) {
for (i = 3; i <= grid_points[0]-4; i++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1]);
i = grid_points[0]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][j] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][j] = dtpp * dtemp[m];
}
cuf[j] = buf[2][j] * buf[2][j];
buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] +
buf[3][j] * buf[3][j];
q[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] +
buf[3][j]*ue[3][j]);
}
for (j = 1; j <= grid_points[1]-2; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
ty2*( ue[2][jp1]-ue[2][jm1] )+
dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+
yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+
dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))-
(ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+
yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+
dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+
yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+
dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])-
buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+
0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+
buf[0][jm1])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+
dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]);
j = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][j-1] + 6.0*ue[m][j] -
4.0*ue[m][j+1] + ue[m][j+2]);
}
for (m = 0; m < 5; m++) {
for (j = 3; j <= grid_points[1]-4; j++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1]);
j = grid_points[1]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][k] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][k] = dtpp * dtemp[m];
}
cuf[k] = buf[3][k] * buf[3][k];
buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] +
buf[2][k] * buf[2][k];
q[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] +
buf[3][k]*ue[3][k]);
}
for (k = 1; k <= grid_points[2]-2; k++) {
km1 = k-1;
kp1 = k+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tz2*( ue[3][kp1]-ue[3][km1] )+
dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+
zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+
dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+
zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+
dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))-
(ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+
zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+
dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])-
buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+
0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k]
+buf[0][km1])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+
dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]);
k = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][k-1] + 6.0*ue[m][k] -
4.0*ue[m][k+1] + ue[m][k+2]);
}
for (m = 0; m < 5; m++) {
for (k = 3; k <= grid_points[2]-4; k++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1]);
k = grid_points[2]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[0][m] +
xi*(ce[1][m] + xi*(ce[4][m] +
xi*(ce[7][m] + xi*ce[10][m]))) +
eta*(ce[2][m] + eta*(ce[5][m] +
eta*(ce[8][m] + eta*ce[11][m])))+
zeta*(ce[3][m] + zeta*(ce[6][m] +
zeta*(ce[9][m] +
zeta*ce[12][m])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
for (i = 0; i <= IMAX-1; i++) {
for (j = 0; j <= IMAX-1; j++) {
for (k = 0; k <= IMAX-1; k++) {
u[0][i][j][k] = 1.0;
u[1][i][j][k] = 0.0;
u[2][i][j][k] = 0.0;
u[3][i][j][k] = 0.0;
u[4][i][j][k] = 1.0;
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[m][i][j][k] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
xi = 0.0;
i = 0;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
xi = 1.0;
i = grid_points[0]-1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
eta = 0.0;
j = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
eta = 1.0;
j = grid_points[1]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
zeta = 0.0;
k = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
zeta = 1.0;
k = grid_points[2]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, n;
/*--------------------------------------------------------------------
c zap the whole left hand side for starters
c-------------------------------------------------------------------*/
for (n = 0; n < 15; n++) {
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[n][i][j][k] = 0.0;
}
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but
c convenient
c-------------------------------------------------------------------*/
for (n = 0; n < 3; n++) {
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[5*n+2][i][j][k] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three x-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
ru1 = c3c4*rho_i[i][j][k];
cv[i] = us[i][j][k];
rhon[i] = max(dx2+con43*ru1,
max(dx5+c1c5*ru1,
max(dxmax+ru1,
dx1)));
}
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1];
lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i];
lhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6;
lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4;
lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;
}
}
#pragma omp for nowait
for (i = 3; i <= grid_points[0]-4; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
i = grid_points[0]-3;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c) by adding to
c the first
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttx2 * speed[i-1][j][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttx2 * speed[i+1][j][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttx2 * speed[i-1][j][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttx2 * speed[i+1][j][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (j = 0; j <= grid_points[1]-1; j++) {
ru1 = c3c4*rho_i[i][j][k];
cv[j] = vs[i][j][k];
rhoq[j] = max(dy3 + con43 * ru1,
max(dy5 + c1c5*ru1,
max(dymax + ru1,
dy1)));
}
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];
lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j];
lhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;
lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;
lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3; j <= grid_points[1]-4; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
j = grid_points[1]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, do the other two factors
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dtty2 * speed[i][j-1][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dtty2 * speed[i][j+1][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dtty2 * speed[i][j-1][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dtty2 * speed[i][j+1][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
#pragma omp for
for (k = 0; k <= grid_points[2]-1; k++) {
ru1 = c3c4*rho_i[i][j][k];
cv[k] = ws[i][j][k];
rhos[k] = max(dz4 + con43 * ru1,
max(dz5 + c1c5 * ru1,
max(dzmax + ru1,
dz1)));
}
#pragma omp for
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k];
lhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;
lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;
lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3; k <= grid_points[2]-4; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
k = grid_points[2]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c)
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttz2 * speed[i][j][k-1];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttz2 * speed[i][j][k+1];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttz2 * speed[i][j][k-1];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttz2 * speed[i][j][k+1];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ninvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r3;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = -r2;
rhs[1][i][j][k] = r1;
rhs[2][i][j][k] = bt * ( r4 - r5 );
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r1;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = bt * ( r4 - r5 );
rhs[1][i][j][k] = -r3;
rhs[2][i][j][k] = r2;
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, m;
double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1,
wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rho_inv = 1.0/u[0][i][j][k];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[1][i][j][k] * rho_inv;
vs[i][j][k] = u[2][i][j][k] * rho_inv;
ws[i][j][k] = u[3][i][j][k] * rho_inv;
square[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] +
u[2][i][j][k]*u[2][i][j][k] +
u[3][i][j][k]*u[3][i][j][k] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
/*--------------------------------------------------------------------
c (do not need speed and ainx until the lhs computation)
c-------------------------------------------------------------------*/
aux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);
aux = sqrt(aux);
speed[i][j][k] = aux;
ainv[i][j][k] = 1.0/aux;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rhs[m][i][j][k] = forcing[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 *
(u[0][i+1][j][k] - 2.0*u[0][i][j][k] +
u[0][i-1][j][k]) -
tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 *
(u[1][i+1][j][k] - 2.0*u[1][i][j][k] +
u[1][i-1][j][k]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[1][i+1][j][k]*up1 -
u[1][i-1][j][k]*um1 +
(u[4][i+1][j][k]- square[i+1][j][k]-
u[4][i-1][j][k]+ square[i-1][j][k])*
c2);
rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 *
(u[2][i+1][j][k] - 2.0*u[2][i][j][k] +
u[2][i-1][j][k]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[2][i+1][j][k]*up1 -
u[2][i-1][j][k]*um1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 *
(u[3][i+1][j][k] - 2.0*u[3][i][j][k] +
u[3][i-1][j][k]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[3][i+1][j][k]*up1 -
u[3][i-1][j][k]*um1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 *
(u[4][i+1][j][k] - 2.0*u[4][i][j][k] +
u[4][i-1][j][k]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i-1][j][k]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[4][i+1][j][k] -
c2*square[i+1][j][k])*up1 -
(c1*u[4][i-1][j][k] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k]);
}
}
}
i = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k] );
}
}
}
}
i = grid_points[0]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );
}
}
}
i = grid_points[0]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 *
(u[0][i][j+1][k] - 2.0*u[0][i][j][k] +
u[0][i][j-1][k]) -
ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 *
(u[1][i][j+1][k] - 2.0*u[1][i][j][k] +
u[1][i][j-1][k]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[1][i][j+1][k]*vp1 -
u[1][i][j-1][k]*vm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 *
(u[2][i][j+1][k] - 2.0*u[2][i][j][k] +
u[2][i][j-1][k]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[2][i][j+1][k]*vp1 -
u[2][i][j-1][k]*vm1 +
(u[4][i][j+1][k] - square[i][j+1][k] -
u[4][i][j-1][k] + square[i][j-1][k])
*c2);
rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 *
(u[3][i][j+1][k] - 2.0*u[3][i][j][k] +
u[3][i][j-1][k]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[3][i][j+1][k]*vp1 -
u[3][i][j-1][k]*vm1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 *
(u[4][i][j+1][k] - 2.0*u[4][i][j][k] +
u[4][i][j-1][k]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j-1][k]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[4][i][j+1][k] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[4][i][j-1][k] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k]);
}
}
}
j = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k] );
}
}
}
}
j = grid_points[1]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );
}
}
}
j = grid_points[1]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 *
(u[0][i][j][k+1] - 2.0*u[0][i][j][k] +
u[0][i][j][k-1]) -
tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 *
(u[1][i][j][k+1] - 2.0*u[1][i][j][k] +
u[1][i][j][k-1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[1][i][j][k+1]*wp1 -
u[1][i][j][k-1]*wm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 *
(u[2][i][j][k+1] - 2.0*u[2][i][j][k] +
u[2][i][j][k-1]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[2][i][j][k+1]*wp1 -
u[2][i][j][k-1]*wm1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 *
(u[3][i][j][k+1] - 2.0*u[3][i][j][k] +
u[3][i][j][k-1]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[3][i][j][k+1]*wp1 -
u[3][i][j][k-1]*wm1 +
(u[4][i][j][k+1] - square[i][j][k+1] -
u[4][i][j][k-1] + square[i][j][k-1])
*c2);
rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 *
(u[4][i][j][k+1] - 2.0*u[4][i][j][k] +
u[4][i][j][k-1]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j][k-1]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[4][i][j][k+1] -
c2*square[i][j][k+1])*wp1 -
(c1*u[4][i][j][k-1] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2]);
}
}
}
k = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3*1; k <= grid_points[2]-3*1-1; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2] );
}
}
}
}
k = grid_points[2]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );
}
}
}
k = grid_points[2]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
5.0*u[m][i][j][k] );
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] * dt;
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[1][0] = 0.0;
ce[2][0] = 0.0;
ce[3][0] = 4.0;
ce[4][0] = 5.0;
ce[5][0] = 3.0;
ce[6][0] = 0.5;
ce[7][0] = 0.02;
ce[8][0] = 0.01;
ce[9][0] = 0.03;
ce[10][0] = 0.5;
ce[11][0] = 0.4;
ce[12][0] = 0.3;
ce[0][1] = 1.0;
ce[1][1] = 0.0;
ce[2][1] = 0.0;
ce[3][1] = 0.0;
ce[4][1] = 1.0;
ce[5][1] = 2.0;
ce[6][1] = 3.0;
ce[7][1] = 0.01;
ce[8][1] = 0.03;
ce[9][1] = 0.02;
ce[10][1] = 0.4;
ce[11][1] = 0.3;
ce[12][1] = 0.5;
ce[0][2] = 2.0;
ce[1][2] = 2.0;
ce[2][2] = 0.0;
ce[3][2] = 0.0;
ce[4][2] = 0.0;
ce[5][2] = 2.0;
ce[6][2] = 3.0;
ce[7][2] = 0.04;
ce[8][2] = 0.03;
ce[9][2] = 0.05;
ce[10][2] = 0.3;
ce[11][2] = 0.5;
ce[12][2] = 0.4;
ce[0][3] = 2.0;
ce[1][3] = 2.0;
ce[2][3] = 0.0;
ce[3][3] = 0.0;
ce[4][3] = 0.0;
ce[5][3] = 2.0;
ce[6][3] = 3.0;
ce[7][3] = 0.03;
ce[8][3] = 0.05;
ce[9][3] = 0.04;
ce[10][3] = 0.2;
ce[11][3] = 0.1;
ce[12][3] = 0.3;
ce[0][4] = 5.0;
ce[1][4] = 4.0;
ce[2][4] = 3.0;
ce[3][4] = 2.0;
ce[4][4] = 0.1;
ce[5][4] = 0.4;
ce[6][4] = 0.3;
ce[7][4] = 0.05;
ce[8][4] = 0.04;
ce[9][4] = 0.03;
ce[10][4] = 0.1;
ce[11][4] = 0.3;
ce[12][4] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
bt = sqrt(0.5);
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void txinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
--------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3,
r4, r5, ac2inv;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
ru1 = rho_i[i][j][k];
uu = us[i][j][k];
vv = vs[i][j][k];
ww = ws[i][j][k];
ac = speed[i][j][k];
ac2inv = ainv[i][j][k]*ainv[i][j][k];
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 -
vv*r3 - ww*r4 + r5 );
t2 = bt * ru1 * ( uu * r1 - r2 );
t3 = ( bt * ru1 * ac ) * t1;
rhs[0][i][j][k] = r1 - t1;
rhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );
rhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );
rhs[3][i][j][k] = - t2 + t3;
rhs[4][i][j][k] = t2 + t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void tzetar(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3,
r4, r5, btuz, acinv, ac2u, uzik1;
#pragma omp for private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
xvel = us[i][j][k];
yvel = vs[i][j][k];
zvel = ws[i][j][k];
ac = speed[i][j][k];
acinv = ainv[i][j][k];
ac2u = ac*ac;
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
uzik1 = u[0][i][j][k];
btuz = bt * uzik1;
t1 = btuz*acinv * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[0][i][j][k] = t2;
rhs[1][i][j][k] = -uzik1*r2 + xvel*t2;
rhs[2][i][j][k] = uzik1*r1 + yvel*t2;
rhs[3][i][j][k] = zvel*t2 + t3;
rhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) +
qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *class, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
--------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02
--------------------------------------------------------------------*/
if ( grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 100) {
*class = 'S';
dtref = 1.5e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.7470315451339479e-02;
xcrref[1] = 1.0360746705285417e-02;
xcrref[2] = 1.6235745065095532e-02;
xcrref[3] = 1.5840557224455615e-02;
xcrref[4] = 3.4849040609362460e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 2.7289258557377227e-05;
xceref[1] = 1.0364446640837285e-05;
xceref[2] = 1.6154798287166471e-05;
xceref[3] = 1.5750704994480102e-05;
xceref[4] = 3.4177666183390531e-05;
/*--------------------------------------------------------------------
c reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 36 &&
grid_points[1] == 36 &&
grid_points[2] == 36 &&
no_time_steps == 400) {
*class = 'W';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.1893253733584e-02;
xcrref[1] = 0.1717075447775e-03;
xcrref[2] = 0.2778153350936e-03;
xcrref[3] = 0.2887475409984e-03;
xcrref[4] = 0.3143611161242e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.7542088599534e-04;
xceref[1] = 0.6512852253086e-05;
xceref[2] = 0.1049092285688e-04;
xceref[3] = 0.1128838671535e-04;
xceref[4] = 0.1212845639773e-03;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 400 ) {
*class = 'A';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.4799822399300195;
xcrref[1] = 1.1276337964368832;
xcrref[2] = 1.5028977888770491;
xcrref[3] = 1.4217816211695179;
xcrref[4] = 2.1292113035138280;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 1.0900140297820550e-04;
xceref[1] = 3.7343951769282091e-05;
xceref[2] = 5.0092785406541633e-05;
xceref[3] = 4.7671093939528255e-05;
xceref[4] = 1.3621613399213001e-04;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 400 time steps,
c with DT = 1.0d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 400) {
*class = 'B';
dtref = 1.0e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.6903293579998e+02;
xcrref[1] = 0.3095134488084e+02;
xcrref[2] = 0.4103336647017e+02;
xcrref[3] = 0.3864769009604e+02;
xcrref[4] = 0.5643482272596e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.9810006190188e-02;
xceref[1] = 0.1022827905670e-02;
xceref[2] = 0.1720597911692e-02;
xceref[3] = 0.1694479428231e-02;
xceref[4] = 0.1847456263981e-01;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 400 time steps,
c with DT = 0.67d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 400) {
*class = 'C';
dtref = 0.67e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.5881691581829e+03;
xcrref[1] = 0.2454417603569e+03;
xcrref[2] = 0.3293829191851e+03;
xcrref[3] = 0.3081924971891e+03;
xcrref[4] = 0.4597223799176e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.2598120500183e+00;
xceref[1] = 0.2590888922315e-01;
xceref[2] = 0.5132886416320e-01;
xceref[3] = 0.4806073419454e-01;
xceref[4] = 0.5483377491301e+00;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ;
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf(" Verification being performed for class %1c\n", *class);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the x-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the x-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, i1, i2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsx();
/*--------------------------------------------------------------------
c perform the Thomas algorithm; first, FORWARD ELIMINATION
--------------------------------------------------------------------*/
n = 0;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1.0/lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (m = 0; m < 3; m++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
c Do the block-diagonal inversion
--------------------------------------------------------------------*/
ninvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the y-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the y-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, j1, j2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsy();
n = 0;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j1 + 1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
}
pinvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the z-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the z-lines. Boundary conditions are non-periodic
c-------------------------------------------------------------------*/
int i, j, k, n, k1, k2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
c-------------------------------------------------------------------*/
lhsz();
n = 0;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately (some of this is overkill
c if this is the last cell)
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c Whether or not this is the last processor, we always have
c to complete the back-substitution
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The first three factors
c-------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
}
tzetar();
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/sync.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& args) override {
param.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax.resize(tree.param.num_feature * 2);
std::fill(fminmax.begin(), fminmax.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
dmlc::DataIter<ColBatch>* iter = p_fmat->ColIterator();
iter->BeforeFirst();
while (iter->Next()) {
const ColBatch& batch = iter->Value();
for (bst_uint i = 0; i < batch.size; ++i) {
const bst_uint fid = batch.col_index[i];
const ColBatch::Inst& c = batch[i];
if (c.length != 0) {
fminmax[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax[fid * 2 + 0]);
fminmax[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax[fid * 2 + 1]);
}
}
}
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax), fminmax.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax[fid * 2];
bst_float b = fminmax[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax.size(); i += 2) {
const bst_uint fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
unsigned n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const RowBatch::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.split_index();
for (unsigned i = 0; i < inst.length; ++i) {
if (findex == inst[i].index) {
if (inst[i].fvalue < n.split_cond()) {
return n.cleft();
} else {
return n.cright();
}
}
}
return n.cdefault();
}
/*! \brief get number of omp thread in current context */
inline static int get_nthread() {
int nthread;
#pragma omp parallel
{
nthread = omp_get_num_threads();
}
return nthread;
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.info().root_index;
{
// setup position
position.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position.begin(), position.end(), 0);
} else {
for (size_t i = 0; i < position.size(); ++i) {
position[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].hess < 0.0f) position[i] = ~position[i];
}
// mark subsample
if (param.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].hess < 0.0f) continue;
if (!coin_flip(rnd)) position[i] = ~position[i];
}
}
}
{
// expand query
qexpand.reserve(256); qexpand.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (size_t i = 0; i < qexpand.size(); ++i) {
const int nid = qexpand[i];
if (!tree[nid].is_leaf()) {
newnodes.push_back(tree[nid].cleft());
newnodes.push_back(tree[nid].cright());
}
}
// use new nodes for qexpand
qexpand = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position[ridx] < 0) {
position[ridx] = ~nid;
} else {
position[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set rest of instances to default position
const RowSet &rowset = p_fmat->buffered_rowset();
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = this->DecodePosition(ridx);
if (tree[nid].is_leaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].cright() == -1) {
position[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].default_left()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const ColBatch& batch,
const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t i = 0; i < batch.size; ++i) {
ColBatch::Inst col = batch[i];
const bst_uint fid = batch.col_index[i];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].is_leaf());
int pid = tree[nid].parent();
// go back to parent, correct those who are not default
if (!tree[nid].is_root() && tree[pid].split_index() == fid) {
if (fvalue < tree[pid].split_cond()) {
this->SetEncodePosition(ridx, tree[pid].cleft());
} else {
this->SetEncodePosition(ridx, tree[pid].cright());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (size_t i = 0; i < nodes.size(); ++i) {
const int nid = nodes[i];
if (!tree[nid].is_leaf()) {
fsplits.push_back(tree[nid].split_index());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator(fsplits);
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (size_t i = 0; i < batch.size; ++i) {
ColBatch::Inst col = batch[i];
const bst_uint fid = batch.col_index[i];
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].is_leaf() && tree[nid].split_index() == fid) {
if (fvalue < tree[nid].split_cond()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
const MetaInfo &info = fmat.info();
thread_temp.resize(this->get_nthread());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats(param));
for (size_t i = 0; i < qexpand.size(); ++i) {
const unsigned nid = qexpand[i];
thread_temp[tid][nid].Clear();
}
}
const RowSet &rowset = fmat.buffered_rowset();
// setup position
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = position[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair, info, ridx);
}
}
// sum the per thread statistics together
for (size_t j = 0; j < qexpand.size(); ++j) {
const int nid = qexpand[j];
TStats &s = (*p_node_stats)[nid];
s.Clear();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex.begin(), node2workindex.end(), -1);
node2workindex.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand.size(); ++i) {
node2workindex[qexpand[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
explicit_strategy.h | /*
==============================================================================
KratosStructuralApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: Nelson $
* Date: $Date: 2009-09-18 $
* Revision: $Revision: 1.0 $
*
* ***********************************************************/
#if !defined(PFEM2_EXPLICIT_STRATEGY)
#define KRATOS_PFEM2_EXPLICIT_STRATEGY
/* System includes */
#include <string>
#include <iostream>
#include <algorithm>
/////////#define _OPENMP
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/strategies/explicit_strategy.h"
#include "solving_strategies/schemes/scheme.h"
//#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "includes/variables.h"
#include "includes/cfd_variables.h"
#include "containers/array_1d.h"
#include "pfem_2_application.h"
//#include "custom_utilities/neighbours_calculator.h"
//#include "custom_elements/2fluid_2d.h"
//#include "custom_elements/2fluid_3d.h"
namespace Kratos
{
template<
class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class PFEM2_Explicit_Strategy : public ExplicitStrategy<TSparseSpace,TDenseSpace,TLinearSolver>
{
public:
KRATOS_CLASS_POINTER_DEFINITION(PFEM2_Explicit_Strategy);
typedef SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef ConditionsContainerType::iterator ConditionsContainerIterator;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::PropertiesType PropertiesType;
//typedef Element::Pointer ParticlePointer;
//typedef typename std::vector<ParticlePointer> ParticlePointerVector;
//typedef typename std::vector<ParticlePointer>::iterator ParticlePointerIterator;
//typedef WeakPointerVector<Element > ParticleWeakVector;
//typedef WeakPointerVector<Element >::iterator ParticleWeakIterator;
PFEM2_Explicit_Strategy(
ModelPart& model_part,
const int dimension,
// const int damp_type,
//const double damping_ratio,
// const bool virtual_mass,
// const double contact_stiffness_ratio,
// const double max_delta_time,
//const bool CalculateReactions,
//const bool ComputeFemFemContact,
const bool MoveMeshFlag
//typename TLinearSolver::Pointer pNewLinearSolver,
//typename TSchemeType::Pointer pScheme,
//typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver
)
: ExplicitStrategy<TSparseSpace,TDenseSpace,TLinearSolver>(model_part, dimension ,MoveMeshFlag)
{
}
virtual ~PFEM2_Explicit_Strategy () {}
//********************************************
//********************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
//SPECIFIC FUNCTIONS FOR MY APPLICATION
void InitializeSolutionStep() override
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
switch ( CurrentProcessInfo[FRACTIONAL_STEP] )
{
case 0:
{
SetToZeroVariablesInViscousIterations(CurrentProcessInfo);
break;
}
case 3:
{
SetToZeroVariablesInPresureIterations(CurrentProcessInfo);
break;
}
case 4:
{
SetToZeroVariablesForVolumetricStrain(CurrentProcessInfo);
break;
}
case 5:
{
SetToZeroVariablesForPressure(CurrentProcessInfo);
break;
}
case 6:
{
SetToZeroVariablesInPresureViscousCorrection(CurrentProcessInfo);
break;
}
case 7:
{
SetToZeroMassAndArea(CurrentProcessInfo);
break;
}
case 10:
{
SetToZeroVariablesInPresureProjection(CurrentProcessInfo);
break;
}
default:
{
KRATOS_THROW_ERROR(std::logic_error,"Unexpected value for FRACTIONAL_STEP index: ", CurrentProcessInfo[FRACTIONAL_STEP]);
}
}
KRATOS_CATCH("")
}
void FinalizeSolutionStep() override
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
switch ( CurrentProcessInfo[FRACTIONAL_STEP] )
{
case 0:
{
UpdateLoopForViscousIterationsWithNormalization(CurrentProcessInfo);
break;
}
case 3:
{
UpdateLoopForPressureIterationsWithNormalization(CurrentProcessInfo);
break;
}
case 4:
{
UpdateLoopForVolumetricStrain(CurrentProcessInfo);
break;
}
case 5:
{
UpdateLoopForPressure(CurrentProcessInfo);
break;
}
case 6:
{
UpdateLoopForPressureViscousCorrection(CurrentProcessInfo);
break;
}
case 7:
{
UpdateLoopForMassAndArea(CurrentProcessInfo);
break;
}
case 10:
{
NormalizePressureProjection(CurrentProcessInfo);
break;
}
default:
{
KRATOS_THROW_ERROR(std::logic_error,"Unexpected value for FRACTIONAL_STEP index: ", CurrentProcessInfo[FRACTIONAL_STEP]);
}
}
KRATOS_CATCH("")
}
//VISCOUS ITERATIONS
void SetToZeroVariablesInViscousIterations(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
noalias(i->FastGetSolutionStepValue(RHS)) = ZeroVector(3);
i->FastGetSolutionStepValue(NODAL_MASS)=0.0;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForViscousIterationsWithNormalization(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
//const double factor = delta_t;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
array_1d<double,3>& rhs = (i->FastGetSolutionStepValue(RHS));
array_1d<double,3>& node_update_variable = (i->FastGetSolutionStepValue(VELOCITY));
//noalias(node_update_variable) += factor* acceleration ;
noalias(node_update_variable) = rhs /(i->FastGetSolutionStepValue(NODAL_MASS)) ;
}
}
KRATOS_CATCH("")
}
//PRESSURE ITERATIONS
void SetToZeroVariablesInPresureIterations(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
if (iteration_number == 1)
noalias(i->FastGetSolutionStepValue(ACCELERATION)) = ZeroVector(3);
else// if (iteration_number==2)
noalias(i->FastGetSolutionStepValue(ACCELERATION)) = i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO); //second order cos we are in the second (or higher) iteration.
//else if (iteration_number==3)
// noalias(inode->FastGetSolutionStepValue(ACCELERATION)) = - inode->FastGetSolutionStepValue(ACCELERATION) + inode->FastGetSolutionStepValue(PRESS_PROJ_NO_RO);
noalias(i->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
noalias(i->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
noalias(i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO)) = ZeroVector(3);
//noalias(in->GetSolutionStepValue(PRESSURE,1))=in->FastGetSolutionStepValue(PRESSURE);
//noalias(in->FastGetSolutionStepValue(PRESSURE)) = 0.0;
i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
i->FastGetSolutionStepValue(NODAL_MASS)=0.0;
//i->FastGetSolutionStepValue(MASS)=ZeroVector(3);
//i->FastGetSolutionStepValue(PRESSURE)=0.0;
i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN)=0.0;
}
}
KRATOS_CATCH("")
}
void SetToZeroVariablesForVolumetricStrain(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN)=0.0;
}
}
KRATOS_CATCH("")
}
void SetToZeroVariablesForPressure(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
i->FastGetSolutionStepValue(PRESSURE)=0.0;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForPressureIterationsWithNormalization(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
//normalizing variables:
array_1d<double,3>& press_proj_no_ro = (i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO));
array_1d<double,3>& press_proj_stabilization = (i->FastGetSolutionStepValue(PRESS_PROJ));
double& mass = (i->FastGetSolutionStepValue(NODAL_MASS)); //this already includes 1/delta_t
double& area = (i->FastGetSolutionStepValue(NODAL_AREA));
press_proj_no_ro /= mass; //so this is already (pres_proj / mass) * delta_t;
press_proj_stabilization /= area;
//updating acceleration
array_1d<double,3>& acceleration = (i->FastGetSolutionStepValue(ACCELERATION));
noalias(acceleration) -= (press_proj_no_ro);
//updating variable
array_1d<double,3>& velocity = (i->FastGetSolutionStepValue(VELOCITY));
noalias(velocity) += (acceleration) ; //the nodal mass includes the delta_time, so this is actually acceleration*delta_t
i->FastGetSolutionStepValue(PREVIOUS_ITERATION_PRESSURE)=i->FastGetSolutionStepValue(PRESSURE);
}
}
KRATOS_CATCH("")
}
//to calculate only the pressure projection:
void SetToZeroVariablesInPresureProjection(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
noalias(i->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
}
}
KRATOS_CATCH("")
}
void NormalizePressureProjection(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
//normalizing variables:
//array_1d<double,3>& press_proj_no_ro = (i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO));
array_1d<double,3>& press_proj_stabilization = (i->FastGetSolutionStepValue(PRESS_PROJ));
//double& mass = (i->FastGetSolutionStepValue(NODAL_MASS));
//array_1d<double,3>& vectorial_mass=(i->FastGetSolutionStepValue(MASS));
double& area = (i->FastGetSolutionStepValue(NODAL_AREA));
//press_proj_no_ro /= mass;
//press_proj_no_ro(0) /= vectorial_mass(0);
//press_proj_no_ro(1) /= vectorial_mass(1);
//press_proj_no_ro(2) /= vectorial_mass(2)+1e-20;
press_proj_stabilization /= area;
//(i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN))=i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN)/area;
//(i->FastGetSolutionStepValue(PRESSURE))=i->FastGetSolutionStepValue(PRESSURE)/area;
//(i->FastGetSolutionStepValue(ELASTIC_PRESSURE))=i->FastGetSolutionStepValue(ELASTIC_PRESSURE)/area;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForVolumetricStrain(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
double& area = (i->FastGetSolutionStepValue(NODAL_AREA));
(i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN))=i->FastGetSolutionStepValue(VOLUMETRIC_STRAIN)/area;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForPressure(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
double& area = (i->FastGetSolutionStepValue(NODAL_AREA));
(i->FastGetSolutionStepValue(PRESSURE))=i->FastGetSolutionStepValue(PRESSURE)/area;
}
}
KRATOS_CATCH("")
}
//PRESSURE VISCOUS CORRECTION
void SetToZeroVariablesInPresureViscousCorrection(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
i->FastGetSolutionStepValue(NODAL_MASS)=0.0;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForPressureViscousCorrection(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->FastGetSolutionStepValue(PRESSUREAUX) = i->FastGetSolutionStepValue(NODAL_AREA)/i->FastGetSolutionStepValue(NODAL_MASS);
i->FastGetSolutionStepValue(EXTERNAL_PRESSURE) = i->FastGetSolutionStepValue(PRESSUREAUX) + i->FastGetSolutionStepValue(PRESSURE);
}
}
KRATOS_CATCH("")
}
void SetToZeroMassAndArea(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->FastGetSolutionStepValue(NODAL_MASS) = 0.0;
i->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForMassAndArea(ProcessInfo& CurrentProcessInfo)
{
KRATOS_TRY
KRATOS_CATCH("")
}
};
template<
class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class Fluid_Phase_PFEM2_Explicit_Strategy : public ExplicitStrategy<TSparseSpace,TDenseSpace,TLinearSolver>
{
public:
KRATOS_CLASS_POINTER_DEFINITION(Fluid_Phase_PFEM2_Explicit_Strategy);
typedef SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef ConditionsContainerType::iterator ConditionsContainerIterator;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::PropertiesType PropertiesType;
//typedef Element::Pointer ParticlePointer;
//typedef typename std::vector<ParticlePointer> ParticlePointerVector;
//typedef typename std::vector<ParticlePointer>::iterator ParticlePointerIterator;
//typedef WeakPointerVector<Element > ParticleWeakVector;
//typedef WeakPointerVector<Element >::iterator ParticleWeakIterator;
Fluid_Phase_PFEM2_Explicit_Strategy(
ModelPart& model_part,
const int dimension,
// const int damp_type,
//const double damping_ratio,
// const bool virtual_mass,
// const double contact_stiffness_ratio,
// const double max_delta_time,
//const bool CalculateReactions,
//const bool ComputeFemFemContact,
const bool MoveMeshFlag
//typename TLinearSolver::Pointer pNewLinearSolver,
//typename TSchemeType::Pointer pScheme,
//typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver
)
: ExplicitStrategy<TSparseSpace,TDenseSpace,TLinearSolver>(model_part, dimension ,MoveMeshFlag)
{
}
virtual ~Fluid_Phase_PFEM2_Explicit_Strategy () {}
//********************************************
//********************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
//SPECIFIC FUNCTIONS FOR MY APPLICATION
void InitializeSolutionStep() override
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
switch ( CurrentProcessInfo[FRACTIONAL_STEP] )
{
case 0:
{
SetToZeroVariablesInViscousIterations();
break;
}
case 3:
{
SetToZeroVariablesInPresureIterations();
break;
}
default:
{
KRATOS_THROW_ERROR(std::logic_error,"Unexpected value for FRACTIONAL_STEP index: ", CurrentProcessInfo[FRACTIONAL_STEP]);
}
}
KRATOS_CATCH("")
}
void FinalizeSolutionStep() override
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
switch ( CurrentProcessInfo[FRACTIONAL_STEP] )
{
case 0:
{
UpdateLoopForViscousIterationsWithNormalization();
break;
}
case 3:
{
UpdateLoopForPressureIterationsWithNormalization();
break;
}
default:
{
KRATOS_THROW_ERROR(std::logic_error,"Unexpected value for FRACTIONAL_STEP index: ", CurrentProcessInfo[FRACTIONAL_STEP]);
}
}
KRATOS_CATCH("")
}
//VISCOUS ITERATIONS
void SetToZeroVariablesInViscousIterations()
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
//ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
//const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
noalias(i->FastGetSolutionStepValue(RHS)) = ZeroVector(3);
i->FastGetSolutionStepValue(NODAL_MASS)=0.0;
}
}
KRATOS_CATCH("")
}
void UpdateLoopForViscousIterationsWithNormalization()
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
//ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
NodesArrayType& pNodes = r_model_part.Nodes();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
array_1d<double,3>& rhs = (i->FastGetSolutionStepValue(RHS));
array_1d<double,3>& node_update_variable = (i->FastGetSolutionStepValue(WATER_VELOCITY));
noalias(node_update_variable) = rhs/(i->FastGetSolutionStepValue(NODAL_MASS)) ;
if (i->IsFixed(WATER_VELOCITY_X)==true)
noalias(node_update_variable) = (i->FastGetSolutionStepValue(WATER_VELOCITY,1));
}
}
KRATOS_CATCH("")
}
//PRESSURE ITERATIONS
void SetToZeroVariablesInPresureIterations()
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double delta_t = CurrentProcessInfo[DELTA_TIME];
const int iteration_number = CurrentProcessInfo[NL_ITERATION_NUMBER];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
if (iteration_number == 1)
noalias(i->FastGetSolutionStepValue(ACCELERATION)) = ZeroVector(3);
else// if (iteration_number==2)
noalias(i->FastGetSolutionStepValue(ACCELERATION)) = i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO); //second order cos we are in the second (or higher) iteration.
//else if (iteration_number==3)
// noalias(inode->FastGetSolutionStepValue(ACCELERATION)) = - inode->FastGetSolutionStepValue(ACCELERATION) + inode->FastGetSolutionStepValue(PRESS_PROJ_NO_RO);
//noalias(i->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
noalias(i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO)) = ZeroVector(3);
//noalias(in->GetSolutionStepValue(PRESSURE,1))=in->FastGetSolutionStepValue(PRESSURE);
//noalias(in->FastGetSolutionStepValue(PRESSURE)) = 0.0;
//i->FastGetSolutionStepValue(NODAL_AREA)=0.0;
i->FastGetSolutionStepValue(NODAL_MASS)=0.0;
//i->FastGetSolutionStepValue(MASS)=ZeroVector(3);
}
}
KRATOS_CATCH("")
}
void UpdateLoopForPressureIterationsWithNormalization()
{
KRATOS_TRY
ModelPart& r_model_part = BaseType::GetModelPart();
//ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
NodesArrayType& pNodes = r_model_part.Nodes();
//const double factor = CurrentProcessInfo.GetValue(DELTA_TIME); //included in factor
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
//normalizing variables:
array_1d<double,3>& press_proj_no_ro = (i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO));
//array_1d<double,3>& press_proj_stabilization = (i->FastGetSolutionStepValue(PRESS_PROJ));
double& mass = (i->FastGetSolutionStepValue(NODAL_MASS));
//array_1d<double,3>& vectorial_mass=(i->FastGetSolutionStepValue(MASS));
//double& area = (i->FastGetSolutionStepValue(NODAL_AREA));
press_proj_no_ro /= mass;
//press_proj_no_ro(0) /= vectorial_mass(0);
//press_proj_no_ro(1) /= vectorial_mass(1);
//press_proj_no_ro(2) /= vectorial_mass(2)+1e-20;
//press_proj_stabilization /= area;
//updating acceleration
array_1d<double,3>& acceleration = (i->FastGetSolutionStepValue(ACCELERATION));
noalias(acceleration) -= (press_proj_no_ro);
//updating variable
array_1d<double,3>& velocity = (i->FastGetSolutionStepValue(WATER_VELOCITY));
noalias(velocity) += (acceleration) ;
i->FastGetSolutionStepValue(PREVIOUS_ITERATION_PRESSURE)=i->FastGetSolutionStepValue(WATER_PRESSURE);
//i->FastGetSolutionStepValue(FRACT_VEL)=i->FastGetSolutionStepValue(VELOCITY);
if (i->IsFixed(WATER_VELOCITY_X)==true)
noalias(velocity) = (i->FastGetSolutionStepValue(WATER_VELOCITY,1));
}
}
KRATOS_CATCH("")
}
};
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_CENTRAL_DIFERENCES_STRATEGY */
|
hardswish_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: renzun@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include <math.h>
#include <arm_neon.h>
#include "hardswish_param.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct hardswish_param* hardswish_param = ( struct hardswish_param* )ir_node->op.param_mem;
float alpha = hardswish_param->alpha;
float beta = hardswish_param->beta;
float lower = -beta / alpha;
float upper = (1.f / alpha) + lower;
int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]);
int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
float* pdata = ( float* )input_tensor->data;
float* pout_data = ( float* )output_tensor->data;
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _one = vdupq_n_f32(1.f);
int num_thread = exec_graph->num_thread;
#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < chan_num; j++)
{
float* data = pdata + j * chan_size;
float* out_data = pout_data + j * chan_size;
for (int i = 0; i < (chan_size & -4); i += 4)
{
float32x4_t _p = vld1q_f32(data + i);
float32x4_t _ans = vdupq_n_f32(beta);
_ans = vmlaq_n_f32(_ans, _p, alpha);
_ans = vmaxq_f32(_ans, _zero);
_ans = vminq_f32(_ans, _one);
_ans = vmulq_f32(_ans, _p);
vst1q_f32(out_data + i, _ans);
}
for (int i = chan_size & ~3; i < chan_size; i++)
{
if (data[i] < lower)
out_data[i] = 0.f;
else if (data[i] > upper)
out_data[i] = data[i];
else
out_data[i] = data[i] * (data[i] * alpha + beta);
}
}
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_hardswish_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops);
}
static int unreg_hardswish_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_hardswish_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_hardswish_hcl_ops);
|
omp_simd.c | //Variable examples of using simd directives
void foo (int n, double *a, double* b)
{
#pragma omp simd
for (int i=0; i<n; i++)
a[i]=b[i];
}
|
gen04.c | /*
Description:
This program implements my Genetic Algorithm method of solving the "N-Queens Problem"
Author:
Georgios Evangelou (1046900)
Year: 5
Parallel Programming in Machine Learning Problems
Electrical and Computer Engineering Department, University of Patras
System Specifications:
CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips)
GPU: Nvidia GTX 1050 (dual-fan, overclocked)
RAM: 8GB (dual-channel, @2666 MHz)
Version Notes:
Compiles/Runs/Debugs with: gcc gen04.c -o gen04 -lm -fopt-info -fopenmp -pg && time ./gen04 && gprof ./gen04
Inherits all features of previous version if not stated otherwise
Supports multi-threaded execution. Each thread has a separate set of genes to work on and all of them
try to find a solution
Worse execution time compared to single-threaded version, probably because of the call to rand() function
Profiler output for N=100 queens, GENES=600 genes and no optimizations:
Each sample counts as 0.01 seconds.
% cumulative self self total
time seconds seconds calls s/call s/call name
63.57 31.10 31.10 608981 0.00 0.00 UtilityFunction
30.54 46.05 14.94 627289 0.00 0.00 MutationFunction
3.09 47.56 1.51 194284123 0.00 0.00 RandomInteger
2.41 48.74 1.18 936 0.00 0.02 BreedGeneration
0.45 48.96 0.22 305403 0.00 0.00 CrossoverFunction
0.02 48.97 0.01 frame_dummy
0.00 48.97 0.00 993 0.00 0.03 CalculateAllUtilityValues
0.00 48.97 0.00 11 0.00 0.00 GeneInitialization
0.00 48.97 0.00 7 0.00 6.99 Solve
Without any optimizations and 12 threads reported:
For N=50 queens a solution is found after:
~0m00,532s and 178 generations, using 50 genes per thread(2130 summed generations)
For N=100 queens a solution is found after:
~0m02,345s and 385 generations, using 100 genes per thread(4643 summed generations)
For N=200 queens a solution is found after:
~0m25,780s and 96 generations, using 600 genes per thread(1130 summed generations)
*/
// ****************************************************************************************************************
//#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations
//#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system
//#pragma GCC target("avx") //Enable AVX
// ****************************************************************************************************************
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "stdbool.h"
#include "time.h"
#include "omp.h"
// ****************************************************************************************************************
#define N 200 //Number of queens
#define GENES 600 //Number of genes (must by even)
#define TARGET_THREADS 12 //Number of threads to ask
/**
* Produces a random integer in the range [mini,maxi]
*/
int RandomInteger(int mini, int maxi) {
int gap = maxi-mini;
int randomInGap = (int) (gap * ((float)rand())/((float)RAND_MAX) ); //[0,gap]
return mini + randomInGap; //[mini,mini+gap]==[mini,maxi]
}
/**
* Initializes positional array given
*/
void GeneInitialization(int genes[GENES][N]) {
for (int i=0; i<GENES; i++) {
for (int j=0; j<N; j++) {
genes[i][j] = RandomInteger(0,N-1);
}
}
}
/**
* Prints a map of the queens until the M-th positioned queen
*/
void Map3(int posY[N], int M) {
for (int i=0; i<N; i++) printf("==="); printf("===\n---");
for (int i=0; i<N/3; i++) printf("---"); printf(" FITTEST GENE ");
for (int i=0; i<N/3; i++) printf("---"); printf("---\n===");
for (int i=0; i<N; i++) printf("==="); printf("\n");
for (int i=0; i<N; i++) printf("---"); printf("---\n##|");
for (int i=0; i<N; i++) printf("%2d ", i+1); printf("\n---");
for (int i=0; i<N; i++) printf("---"); printf("\n");
for (int y=0; y<N; y++) {
printf("%2d| ", y+1);
for (int x=0; x<N; x++) {
bool flag = false;
for (int i=0; i<M; i++) {
if (i==x && posY[i]==y) {
flag = true;
}
}
if (flag) printf("Q");
else printf("~");
printf(" ");
}
printf("\n");
}
for (int i=0; i<N; i++) printf("---"); printf("---\n");
}
/**
* Checks if a position is safe
*/
bool isSafeFromPrevious(int posY[N], int x, int y) {
int currentQueen = x;
for (int oldQueen=0; oldQueen<currentQueen; oldQueen++) {
//printf(" Checking %d %d and %d %d \n",posX[q],posY[q],x,y);
if (oldQueen==x || posY[oldQueen]==y) return false; //If row/column is endangered
else if (y==posY[oldQueen]+(currentQueen-oldQueen) || y==posY[oldQueen]-(currentQueen-oldQueen)) return false; //If diagonal is endangered
}
return true;
}
/**
* Finds the number collisions between the queens
*/
int UtilityFunction(int posY[N]) {
int collisions = 0;
for (int crnt=1; crnt<N; crnt++) {
for (int old=0; old<crnt; old++) {
if (old==crnt || posY[old]==posY[crnt]) collisions++; //If row/column is endangered
else if (posY[crnt]==posY[old]+(crnt-old) || posY[crnt]==posY[old]-(crnt-old)) collisions++; //If diagonal is endangered
}
}
return collisions;
}
/**
* Takes two parent genes and produces two child genes
*/
void CrossoverFunction(int gene1[N], int gene2[N]) {
for (int i=1; i<N; i++) {
if (abs(gene1[i-1]-gene1[i])<2 || abs(gene2[i-1]-gene2[i])<2) {
int temp = gene1[i];
gene1[i] = gene2[i];
gene2[i] = temp;
}
}
}
/**
* Takes a gene and mutates it
*/
void MutationFunction(int gene[N]) {
// Mark all values missing from the gene, so they can be used to replace duplicates
int inGene[N] = {0};
// Un-mark all existing values
for (int i=0; i<N; i++) {
inGene[gene[i]] = 1;
}
// Find duplicates and replace them with non-used values
for (int i=1; i<N; i++) {
for (int j=0; j<i; j++) {
if (gene[i]==gene[j]) {
for (int k=0; k<N; k++){
if (inGene[k]==0) {
gene[i] = k;
inGene[k] = 1;
k = N;
}
}
}
}
}
// Performs the actual swapping
int barrier = RandomInteger(1,N-3); // [1, N-3]
int swapA = RandomInteger(0,barrier); // [0,barrier]
int swapB = RandomInteger(barrier+1,N-1); // [barrier+1,N-1]
int temp = gene[swapA];
gene[swapA] = gene[swapB];
gene[swapB] = temp;
}
/**
* Breeds next generation
*/
void BreedGeneration(int genes[GENES][N], int utilityValues[GENES]) {
int genesNew[GENES][N] = {-1};
// For all pairs of genes to create
for (int i=0; i<GENES-1; i+=2) {
int index1 = -1, index2 = -1;
float limit_value = INFINITY;
float value1 = limit_value, value2 = limit_value;
//...access all current genes and in a semi-stochastic way, pick two low-value parents
for (int j=0; j<GENES; j++) {
float value = (float) (10 + RandomInteger(10,20)*utilityValues[j] );
if (value<=value1) {
value2 = value1;
index2 = index1;
value1 = value;
index1 = j;
} else if (value<value2) {
value2 = value;
index2 = j;
}
}
//...then copy the parents to the new array
for (int k=0; k<N; k++) {
genesNew[i][k] = genes[index1][k];
genesNew[i+1][k] = genes[index2][k];
}
//...breed and mutate their children
CrossoverFunction(genesNew[i], genesNew[i+1]);
MutationFunction(genesNew[i]);
MutationFunction(genesNew[i+1]);
}
// Finally copy the new genes into the old ones
for (int i=0; i<GENES; i++) {
for (int j=0; j<N; j++) {
genes[i][j] = genesNew[i][j];
}
}
}
/**
* Calculate and store all current genes utility values
*/
unsigned CalculateAllUtilityValues(int genes[GENES][N], int utilityValues[GENES]) {
int bestUtilityValueFoundAt = 0;
for (int i=0; i<GENES; i++) {
utilityValues[i] = UtilityFunction(genes[i]);
if (utilityValues[i] < utilityValues[bestUtilityValueFoundAt]) {
bestUtilityValueFoundAt = i;
}
}
return bestUtilityValueFoundAt;
}
/**
* Runs the genetic algorithm to solve the problem
*/
long int Solve(int fittestGene[N], unsigned threadID, int *whoHasFinished, unsigned *solversGenerations) {
srand(threadID);
int genes[GENES][N];
int utilityValues[GENES] = {1};
//Create a random set of genes
GeneInitialization(genes);
long int generation = 0;
unsigned bestGene = 0;
//While no solution is found
while(utilityValues[bestGene]!=0 && *whoHasFinished<0) {
generation++;
//...for each repetition create the next generation of genes
BreedGeneration(genes, utilityValues);
//...and calculate all genes's utility values
bestGene = CalculateAllUtilityValues(genes, utilityValues);
}
//After a correct gene has been found, store it in <fittestGene[N]> that is visible to main()
#pragma omp critical
{
*whoHasFinished = threadID;
*solversGenerations = generation;
for (int i=0; i<N; i++) fittestGene[i] = genes[bestGene][i];
}
return generation;
}
/**
* The main program
*/
int main() {
printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("This program implements my Genetic Algorithm method of solving the \"N-Queens Problem\".\n");
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
int fittestGene[N] = {0};
int numberOfThreads = 1, whoHasFinished = -1;
unsigned solversGenerations = 0;
long int totalGenerations = 0;
printf("Queens set at: %d Genes set at: %d\n", N, GENES);
printf("Now solving the problem. Please wait...\n");
#pragma omp parallel num_threads(TARGET_THREADS) reduction(+:totalGenerations)
{
//Check how many threads were created
#pragma omp single
numberOfThreads = omp_get_num_threads();
//Tell each thread to start searching for a solution
totalGenerations = Solve(fittestGene, omp_get_thread_num(), &whoHasFinished, &solversGenerations);
}
printf("Algorithm completed. Number of threads used: %d Total generations: %ld\n", numberOfThreads, totalGenerations);
printf("Solution found by thread #%d in #%u generations.\n", whoHasFinished, solversGenerations);
printf("The solution found is:\n");
//Map3(fittestGene, N);
return 0;
}
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program BT
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
//----------
// Class D:
//----------
//----------
// Class E:
//----------
struct anon_NAS_BT_c_87 {
double real;
double imag;
};
typedef struct anon_NAS_BT_c_87 dcomplex;
/*common /global/*/
int grid_points[3];
/*common /constants/*/
double tx1;
double tx2;
double tx3;
double ty1;
double ty2;
double ty3;
double tz1;
double tz2;
double tz3;
double dx1;
double dx2;
double dx3;
double dx4;
double dx5;
double dy1;
double dy2;
double dy3;
double dy4;
double dy5;
double dz1;
double dz2;
double dz3;
double dz4;
double dz5;
double dssp;
double dt;
double ce[5][13];
double dxmax;
double dymax;
double dzmax;
double xxcon1;
double xxcon2;
double xxcon3;
double xxcon4;
double xxcon5;
double dx1tx1;
double dx2tx1;
double dx3tx1;
double dx4tx1;
double dx5tx1;
double yycon1;
double yycon2;
double yycon3;
double yycon4;
double yycon5;
double dy1ty1;
double dy2ty1;
double dy3ty1;
double dy4ty1;
double dy5ty1;
double zzcon1;
double zzcon2;
double zzcon3;
double zzcon4;
double zzcon5;
double dz1tz1;
double dz2tz1;
double dz3tz1;
double dz4tz1;
double dz5tz1;
double dnxm1;
double dnym1;
double dnzm1;
double c1c2;
double c1c5;
double c3c4;
double c1345;
double conz1;
double c1;
double c2;
double c3;
double c4;
double c5;
double c4dssp;
double c5dssp;
double dtdssp;
double dttx1;
double dttx2;
double dtty1;
double dtty2;
double dttz1;
double dttz2;
double c2dttx1;
double c2dtty1;
double c2dttz1;
double comz1;
double comz4;
double comz5;
double comz6;
double c3c4tx3;
double c3c4ty3;
double c3c4tz3;
double c2iv;
double con43;
double con16;
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/*common /fields/*/
double us[24][25][25];
double vs[24][25][25];
double ws[24][25][25];
double qs[24][25][25];
double rho_i[24][25][25];
double square[24][25][25];
double forcing[24][25][25][5];
double u[24][25][25][5];
double rhs[24][25][25][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *class, int *verified);
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
int main(int argc, char *argv[]) {
int i, niter, step;
double navg, mflops, n3;
double tmax;
double t;
double trecs[12];
int verified;
char Class;
char *t_names[12];
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - BT Benchmark\n\n");
niter = 200;
dt = 0.0008;
grid_points[0] = 24;
grid_points[1] = 24;
grid_points[2] = 24;
printf(" Size: %4dx%4dx%4d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if((grid_points[0] > 24) || (grid_points[1] > 24) || (grid_points[2] > 24)) {
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
set_constants();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 11; i++) {
timer_clear(i);
}
initialize();
exact_rhs();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 11; i++) {
timer_clear(i);
}
timer_start(1);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
printf#277{printf(" Time step %4d\n", step)}
compute_rhs#280{compute_rhs()}
add#2666{add()}
****************************************/
for(step = 1; step <= niter; step++) {
if((step % 20) == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
n3 = 1.0 * grid_points[0] * grid_points[1] * grid_points[2];
navg = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
if(tmax != 0.0) {
mflops = 1.0e-6 * (double) niter * (3478.8 * n3 - 17655.7 * (navg * navg) + 28023.7 * navg) / tmax;
}
else {
mflops = 0.0;
}
print_results("BT", Class, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
void adi() {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add() {
int i, j, k, m;
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, rhs)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, rhs)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, rhs)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5]) {
int i, j, k, m, d;
double xi;
double eta;
double zeta;
double u_exact[5];
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(j, i, m, eta, xi, add) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi, add) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = u[k][j][i][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5]) {
int i, j, k, d, m;
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(grid_points, rhs) reduction(+ : rms[:5])
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m, add) firstprivate(k, grid_points, rhs) reduction(+ : rms[:5])
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m, add) firstprivate(k, j, grid_points, rhs) reduction(+ : rms[:5])
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = rhs[k][j][i][m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs() {
double dtemp[5];
double xi;
double eta;
double zeta;
double dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
double cuf[25];
double q[25];
double ue[25][5];
double buf[25][5];
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(j, i, m, eta, xi, dtpp, im1, ip1) firstprivate(dnym1, dnxm1, zeta, tx2, k, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi, dtpp) firstprivate(dnxm1, zeta, eta, grid_points, ce, dtemp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);
}
// #pragma omp parallel for default(shared) private(i, im1, ip1) firstprivate(tx2, k, j, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, grid_points, ue, q, buf, cuf)
for(i = 1; i <= grid_points[0] - 2; i++) {
im1 = i - 1;
ip1 = i + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ((ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
// #pragma omp parallel for default(shared) private(i, m) firstprivate(dssp, k, j, grid_points, ue)
for(i = 3; i <= grid_points[0] - 4; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = grid_points[0] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(i, j, m, xi, eta, dtpp, jm1, jp1) firstprivate(dnxm1, dnym1, zeta, ty2, k, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) (i) * dnxm1;
// #pragma omp parallel for default(shared) private(j, m, eta, dtpp) firstprivate(dnym1, zeta, xi, grid_points, ce, dtemp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);
}
// #pragma omp parallel for default(shared) private(j, jm1, jp1) firstprivate(ty2, k, i, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, grid_points, ue, buf, q, cuf)
for(j = 1; j <= grid_points[1] - 2; j++) {
jm1 = j - 1;
jp1 = j + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * ((ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
// #pragma omp parallel for default(shared) private(j, m) firstprivate(dssp, k, i, grid_points, ue)
for(j = 3; j <= grid_points[1] - 4; j++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = grid_points[1] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, k, m, xi, zeta, dtpp, km1, kp1) firstprivate(dnxm1, dnzm1, eta, tz2, j, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) (i) * dnxm1;
// #pragma omp parallel for default(shared) private(k, m, zeta, dtpp) firstprivate(dnzm1, eta, xi, grid_points, ce, dtemp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);
}
// #pragma omp parallel for default(shared) private(k, km1, kp1) firstprivate(tz2, j, i, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, grid_points, ue, buf, q, cuf)
for(k = 1; k <= grid_points[2] - 2; k++) {
km1 = k - 1;
kp1 = k + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ((ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
// #pragma omp parallel for default(shared) private(k, m) firstprivate(dssp, j, i, grid_points, ue)
for(k = 3; k <= grid_points[2] - 4; k++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = grid_points[2] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function returns the exact solution at point xi, eta, zeta
//---------------------------------------------------------------------
void exact_solution(double xi, double eta, double zeta, double dtemp[5]) {
int m;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12])));
}
}
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize() {
int i, j, k, m, ix, iy, iz;
double xi;
double eta;
double zeta;
double Pface[2][3][5];
double Pxi;
double Peta;
double Pzeta;
double temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, ix, iy, iz, m, zeta, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, Pface)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(j, i, ix, iy, iz, m, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, Pface)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, ix, iy, iz, m, xi, Pxi, Peta, Pzeta) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, Pface)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(ix = 0; ix < 2; ix++) {
exact_solution((double) ix, eta, zeta, Pface[ix][0]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iy = 0; iy < 2; iy++) {
exact_solution(xi, (double) iy, zeta, Pface[iy][1]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double) iz, Pface[iz][2]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
}
void lhsinit(double lhs[][3][5][5], int size) {
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
void compute_rhs() {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, rho_inv) firstprivate(grid_points, u)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, rho_inv) firstprivate(k, grid_points, u)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, rho_inv) firstprivate(k, j, grid_points, u)
for(i = 0; i <= grid_points[0] - 1; i++) {
rho_inv = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5 * (u[k][j][i][1] * u[k][j][i][1] + u[k][j][i][2] * u[k][j][i][2] + u[k][j][i][3] * u[k][j][i][3]) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, forcing)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, forcing)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
}
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, grid_points, us, u, square, vs, ws, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, uijk, up1, um1) firstprivate(k, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, grid_points, us, u, square, vs, ws, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, uijk, up1, um1) firstprivate(k, j, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, grid_points, us, u, square, vs, ws, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
uijk = us[k][j][i];
up1 = us[k][j][i + 1];
um1 = us[k][j][i - 1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] + u[k][j][i - 1][0]) - tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] + u[k][j][i - 1][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[k][j][i + 1][1] * up1 - u[k][j][i - 1][1] * um1 + (u[k][j][i + 1][4] - square[k][j][i + 1] - u[k][j][i - 1][4] + square[k][j][i - 1]) * c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] + u[k][j][i - 1][2]) + xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] + vs[k][j][i - 1]) - tx2 * (u[k][j][i + 1][2] * up1 - u[k][j][i - 1][2] * um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] + u[k][j][i - 1][3]) + xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] + ws[k][j][i - 1]) - tx2 * (u[k][j][i + 1][3] * up1 - u[k][j][i - 1][3] * um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] + u[k][j][i - 1][4]) + xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] + qs[k][j][i - 1]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j][i - 1][4] * rho_i[k][j][i - 1]) - tx2 * ((c1 * u[k][j][i + 1][4] - c2 * square[k][j][i + 1]) * up1 - (c1 * u[k][j][i - 1][4] - c2 * square[k][j][i - 1]) * um1);
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, m, i) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
i = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
i = 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 3; i <= grid_points[0] - 4; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
}
// #pragma omp parallel for default(shared) private(j, m, i) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
i = grid_points[0] - 3;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m]);
}
i = grid_points[0] - 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4. * u[k][j][i - 1][m] + 5. * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, grid_points, vs, u, us, square, ws, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, vijk, vp1, vm1) firstprivate(k, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, grid_points, vs, u, us, square, ws, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, vijk, vp1, vm1) firstprivate(k, j, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, grid_points, vs, u, us, square, ws, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
vijk = vs[k][j][i];
vp1 = vs[k][j + 1][i];
vm1 = vs[k][j - 1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] + u[k][j - 1][i][0]) - ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] + u[k][j - 1][i][1]) + yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] + us[k][j - 1][i]) - ty2 * (u[k][j + 1][i][1] * vp1 - u[k][j - 1][i][1] * vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] + u[k][j - 1][i][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[k][j + 1][i][2] * vp1 - u[k][j - 1][i][2] * vm1 + (u[k][j + 1][i][4] - square[k][j + 1][i] - u[k][j - 1][i][4] + square[k][j - 1][i]) * c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] + u[k][j - 1][i][3]) + yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] + ws[k][j - 1][i]) - ty2 * (u[k][j + 1][i][3] * vp1 - u[k][j - 1][i][3] * vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] + u[k][j - 1][i][4]) + yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] + qs[k][j - 1][i]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j - 1][i][4] * rho_i[k][j - 1][i]) - ty2 * ((c1 * u[k][j + 1][i][4] - c2 * square[k][j + 1][i]) * vp1 - (c1 * u[k][j - 1][i][4] - c2 * square[k][j - 1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
j = 2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 3; j <= grid_points[1] - 4; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
}
j = grid_points[1] - 3;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m]);
}
}
j = grid_points[1] - 2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4. * u[k][j - 1][i][m] + 5. * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, wijk, wp1, wm1) firstprivate(k, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, wijk, wp1, wm1) firstprivate(k, j, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
wijk = ws[k][j][i];
wp1 = ws[k + 1][j][i];
wm1 = ws[k - 1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] + u[k - 1][j][i][0]) - tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] + u[k - 1][j][i][1]) + zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] + us[k - 1][j][i]) - tz2 * (u[k + 1][j][i][1] * wp1 - u[k - 1][j][i][1] * wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] + u[k - 1][j][i][2]) + zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] + vs[k - 1][j][i]) - tz2 * (u[k + 1][j][i][2] * wp1 - u[k - 1][j][i][2] * wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] + u[k - 1][j][i][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[k + 1][j][i][3] * wp1 - u[k - 1][j][i][3] * wm1 + (u[k + 1][j][i][4] - square[k + 1][j][i] - u[k - 1][j][i][4] + square[k - 1][j][i]) * c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] + u[k - 1][j][i][4]) + zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] + qs[k - 1][j][i]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k - 1][j][i][4] * rho_i[k - 1][j][i]) - tz2 * ((c1 * u[k + 1][j][i][4] - c2 * square[k + 1][j][i]) * wp1 - (c1 * u[k - 1][j][i][4] - c2 * square[k - 1][j][i]) * wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
k = 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dssp, grid_points, u)
for(k = 3; k <= grid_points[2] - 4; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
}
k = grid_points[2] - 3;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m]);
}
}
}
k = grid_points[2] - 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4. * u[k - 1][j][i][m] + 5. * u[k][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dt, grid_points)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dt, grid_points)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dt, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
}
}
void set_constants() {
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double) (grid_points[0] - 1);
dnym1 = 1.0 / (double) (grid_points[1] - 1);
dnzm1 = 1.0 / (double) (grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = ((dx3) > (dx4) ? (dx3) : (dx4));
dymax = ((dy2) > (dy4) ? (dy2) : (dy4));
dzmax = ((dz2) > (dz3) ? (dz2) : (dz3));
dssp = 0.25 * ((dx1) > (((dy1) > (dz1) ? (dy1) : (dz1))) ? (dx1) : (((dy1) > (dz1) ? (dy1) : (dz1))));
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
//---------------------------------------------------------------------
// subtracts bvec=bvec - ablock*avec
//---------------------------------------------------------------------
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
//---------------------------------------------------------------------
// rhs[kc][jc][ic][i] = rhs[kc][jc][ic][i]
// $ - lhs[ia][ablock][0][i]*
//---------------------------------------------------------------------
bvec[0] = bvec[0] - ablock[0][0] * avec[0] - ablock[1][0] * avec[1] - ablock[2][0] * avec[2] - ablock[3][0] * avec[3] - ablock[4][0] * avec[4];
bvec[1] = bvec[1] - ablock[0][1] * avec[0] - ablock[1][1] * avec[1] - ablock[2][1] * avec[2] - ablock[3][1] * avec[3] - ablock[4][1] * avec[4];
bvec[2] = bvec[2] - ablock[0][2] * avec[0] - ablock[1][2] * avec[1] - ablock[2][2] * avec[2] - ablock[3][2] * avec[3] - ablock[4][2] * avec[4];
bvec[3] = bvec[3] - ablock[0][3] * avec[0] - ablock[1][3] * avec[1] - ablock[2][3] * avec[2] - ablock[3][3] * avec[3] - ablock[4][3] * avec[4];
bvec[4] = bvec[4] - ablock[0][4] * avec[0] - ablock[1][4] * avec[1] - ablock[2][4] * avec[2] - ablock[3][4] * avec[3] - ablock[4][4] * avec[4];
}
//---------------------------------------------------------------------
// subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
//---------------------------------------------------------------------
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) {
cblock[0][0] = cblock[0][0] - ablock[0][0] * bblock[0][0] - ablock[1][0] * bblock[0][1] - ablock[2][0] * bblock[0][2] - ablock[3][0] * bblock[0][3] - ablock[4][0] * bblock[0][4];
cblock[0][1] = cblock[0][1] - ablock[0][1] * bblock[0][0] - ablock[1][1] * bblock[0][1] - ablock[2][1] * bblock[0][2] - ablock[3][1] * bblock[0][3] - ablock[4][1] * bblock[0][4];
cblock[0][2] = cblock[0][2] - ablock[0][2] * bblock[0][0] - ablock[1][2] * bblock[0][1] - ablock[2][2] * bblock[0][2] - ablock[3][2] * bblock[0][3] - ablock[4][2] * bblock[0][4];
cblock[0][3] = cblock[0][3] - ablock[0][3] * bblock[0][0] - ablock[1][3] * bblock[0][1] - ablock[2][3] * bblock[0][2] - ablock[3][3] * bblock[0][3] - ablock[4][3] * bblock[0][4];
cblock[0][4] = cblock[0][4] - ablock[0][4] * bblock[0][0] - ablock[1][4] * bblock[0][1] - ablock[2][4] * bblock[0][2] - ablock[3][4] * bblock[0][3] - ablock[4][4] * bblock[0][4];
cblock[1][0] = cblock[1][0] - ablock[0][0] * bblock[1][0] - ablock[1][0] * bblock[1][1] - ablock[2][0] * bblock[1][2] - ablock[3][0] * bblock[1][3] - ablock[4][0] * bblock[1][4];
cblock[1][1] = cblock[1][1] - ablock[0][1] * bblock[1][0] - ablock[1][1] * bblock[1][1] - ablock[2][1] * bblock[1][2] - ablock[3][1] * bblock[1][3] - ablock[4][1] * bblock[1][4];
cblock[1][2] = cblock[1][2] - ablock[0][2] * bblock[1][0] - ablock[1][2] * bblock[1][1] - ablock[2][2] * bblock[1][2] - ablock[3][2] * bblock[1][3] - ablock[4][2] * bblock[1][4];
cblock[1][3] = cblock[1][3] - ablock[0][3] * bblock[1][0] - ablock[1][3] * bblock[1][1] - ablock[2][3] * bblock[1][2] - ablock[3][3] * bblock[1][3] - ablock[4][3] * bblock[1][4];
cblock[1][4] = cblock[1][4] - ablock[0][4] * bblock[1][0] - ablock[1][4] * bblock[1][1] - ablock[2][4] * bblock[1][2] - ablock[3][4] * bblock[1][3] - ablock[4][4] * bblock[1][4];
cblock[2][0] = cblock[2][0] - ablock[0][0] * bblock[2][0] - ablock[1][0] * bblock[2][1] - ablock[2][0] * bblock[2][2] - ablock[3][0] * bblock[2][3] - ablock[4][0] * bblock[2][4];
cblock[2][1] = cblock[2][1] - ablock[0][1] * bblock[2][0] - ablock[1][1] * bblock[2][1] - ablock[2][1] * bblock[2][2] - ablock[3][1] * bblock[2][3] - ablock[4][1] * bblock[2][4];
cblock[2][2] = cblock[2][2] - ablock[0][2] * bblock[2][0] - ablock[1][2] * bblock[2][1] - ablock[2][2] * bblock[2][2] - ablock[3][2] * bblock[2][3] - ablock[4][2] * bblock[2][4];
cblock[2][3] = cblock[2][3] - ablock[0][3] * bblock[2][0] - ablock[1][3] * bblock[2][1] - ablock[2][3] * bblock[2][2] - ablock[3][3] * bblock[2][3] - ablock[4][3] * bblock[2][4];
cblock[2][4] = cblock[2][4] - ablock[0][4] * bblock[2][0] - ablock[1][4] * bblock[2][1] - ablock[2][4] * bblock[2][2] - ablock[3][4] * bblock[2][3] - ablock[4][4] * bblock[2][4];
cblock[3][0] = cblock[3][0] - ablock[0][0] * bblock[3][0] - ablock[1][0] * bblock[3][1] - ablock[2][0] * bblock[3][2] - ablock[3][0] * bblock[3][3] - ablock[4][0] * bblock[3][4];
cblock[3][1] = cblock[3][1] - ablock[0][1] * bblock[3][0] - ablock[1][1] * bblock[3][1] - ablock[2][1] * bblock[3][2] - ablock[3][1] * bblock[3][3] - ablock[4][1] * bblock[3][4];
cblock[3][2] = cblock[3][2] - ablock[0][2] * bblock[3][0] - ablock[1][2] * bblock[3][1] - ablock[2][2] * bblock[3][2] - ablock[3][2] * bblock[3][3] - ablock[4][2] * bblock[3][4];
cblock[3][3] = cblock[3][3] - ablock[0][3] * bblock[3][0] - ablock[1][3] * bblock[3][1] - ablock[2][3] * bblock[3][2] - ablock[3][3] * bblock[3][3] - ablock[4][3] * bblock[3][4];
cblock[3][4] = cblock[3][4] - ablock[0][4] * bblock[3][0] - ablock[1][4] * bblock[3][1] - ablock[2][4] * bblock[3][2] - ablock[3][4] * bblock[3][3] - ablock[4][4] * bblock[3][4];
cblock[4][0] = cblock[4][0] - ablock[0][0] * bblock[4][0] - ablock[1][0] * bblock[4][1] - ablock[2][0] * bblock[4][2] - ablock[3][0] * bblock[4][3] - ablock[4][0] * bblock[4][4];
cblock[4][1] = cblock[4][1] - ablock[0][1] * bblock[4][0] - ablock[1][1] * bblock[4][1] - ablock[2][1] * bblock[4][2] - ablock[3][1] * bblock[4][3] - ablock[4][1] * bblock[4][4];
cblock[4][2] = cblock[4][2] - ablock[0][2] * bblock[4][0] - ablock[1][2] * bblock[4][1] - ablock[2][2] * bblock[4][2] - ablock[3][2] * bblock[4][3] - ablock[4][2] * bblock[4][4];
cblock[4][3] = cblock[4][3] - ablock[0][3] * bblock[4][0] - ablock[1][3] * bblock[4][1] - ablock[2][3] * bblock[4][2] - ablock[3][3] * bblock[4][3] - ablock[4][3] * bblock[4][4];
cblock[4][4] = cblock[4][4] - ablock[0][4] * bblock[4][0] - ablock[1][4] * bblock[4][1] - ablock[2][4] * bblock[4][2] - ablock[3][4] * bblock[4][3] - ablock[4][4] * bblock[4][4];
}
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
c[0][0] = c[0][0] * pivot;
c[1][0] = c[1][0] * pivot;
c[2][0] = c[2][0] * pivot;
c[3][0] = c[3][0] * pivot;
c[4][0] = c[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
c[0][1] = c[0][1] - coeff * c[0][0];
c[1][1] = c[1][1] - coeff * c[1][0];
c[2][1] = c[2][1] - coeff * c[2][0];
c[3][1] = c[3][1] - coeff * c[3][0];
c[4][1] = c[4][1] - coeff * c[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
c[0][2] = c[0][2] - coeff * c[0][0];
c[1][2] = c[1][2] - coeff * c[1][0];
c[2][2] = c[2][2] - coeff * c[2][0];
c[3][2] = c[3][2] - coeff * c[3][0];
c[4][2] = c[4][2] - coeff * c[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
c[0][3] = c[0][3] - coeff * c[0][0];
c[1][3] = c[1][3] - coeff * c[1][0];
c[2][3] = c[2][3] - coeff * c[2][0];
c[3][3] = c[3][3] - coeff * c[3][0];
c[4][3] = c[4][3] - coeff * c[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
c[0][4] = c[0][4] - coeff * c[0][0];
c[1][4] = c[1][4] - coeff * c[1][0];
c[2][4] = c[2][4] - coeff * c[2][0];
c[3][4] = c[3][4] - coeff * c[3][0];
c[4][4] = c[4][4] - coeff * c[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
c[0][1] = c[0][1] * pivot;
c[1][1] = c[1][1] * pivot;
c[2][1] = c[2][1] * pivot;
c[3][1] = c[3][1] * pivot;
c[4][1] = c[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
c[0][0] = c[0][0] - coeff * c[0][1];
c[1][0] = c[1][0] - coeff * c[1][1];
c[2][0] = c[2][0] - coeff * c[2][1];
c[3][0] = c[3][0] - coeff * c[3][1];
c[4][0] = c[4][0] - coeff * c[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
c[0][2] = c[0][2] - coeff * c[0][1];
c[1][2] = c[1][2] - coeff * c[1][1];
c[2][2] = c[2][2] - coeff * c[2][1];
c[3][2] = c[3][2] - coeff * c[3][1];
c[4][2] = c[4][2] - coeff * c[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
c[0][3] = c[0][3] - coeff * c[0][1];
c[1][3] = c[1][3] - coeff * c[1][1];
c[2][3] = c[2][3] - coeff * c[2][1];
c[3][3] = c[3][3] - coeff * c[3][1];
c[4][3] = c[4][3] - coeff * c[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
c[0][4] = c[0][4] - coeff * c[0][1];
c[1][4] = c[1][4] - coeff * c[1][1];
c[2][4] = c[2][4] - coeff * c[2][1];
c[3][4] = c[3][4] - coeff * c[3][1];
c[4][4] = c[4][4] - coeff * c[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
c[0][2] = c[0][2] * pivot;
c[1][2] = c[1][2] * pivot;
c[2][2] = c[2][2] * pivot;
c[3][2] = c[3][2] * pivot;
c[4][2] = c[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
c[0][0] = c[0][0] - coeff * c[0][2];
c[1][0] = c[1][0] - coeff * c[1][2];
c[2][0] = c[2][0] - coeff * c[2][2];
c[3][0] = c[3][0] - coeff * c[3][2];
c[4][0] = c[4][0] - coeff * c[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
c[0][1] = c[0][1] - coeff * c[0][2];
c[1][1] = c[1][1] - coeff * c[1][2];
c[2][1] = c[2][1] - coeff * c[2][2];
c[3][1] = c[3][1] - coeff * c[3][2];
c[4][1] = c[4][1] - coeff * c[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
c[0][3] = c[0][3] - coeff * c[0][2];
c[1][3] = c[1][3] - coeff * c[1][2];
c[2][3] = c[2][3] - coeff * c[2][2];
c[3][3] = c[3][3] - coeff * c[3][2];
c[4][3] = c[4][3] - coeff * c[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
c[0][4] = c[0][4] - coeff * c[0][2];
c[1][4] = c[1][4] - coeff * c[1][2];
c[2][4] = c[2][4] - coeff * c[2][2];
c[3][4] = c[3][4] - coeff * c[3][2];
c[4][4] = c[4][4] - coeff * c[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
c[0][3] = c[0][3] * pivot;
c[1][3] = c[1][3] * pivot;
c[2][3] = c[2][3] * pivot;
c[3][3] = c[3][3] * pivot;
c[4][3] = c[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
c[0][0] = c[0][0] - coeff * c[0][3];
c[1][0] = c[1][0] - coeff * c[1][3];
c[2][0] = c[2][0] - coeff * c[2][3];
c[3][0] = c[3][0] - coeff * c[3][3];
c[4][0] = c[4][0] - coeff * c[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
c[0][1] = c[0][1] - coeff * c[0][3];
c[1][1] = c[1][1] - coeff * c[1][3];
c[2][1] = c[2][1] - coeff * c[2][3];
c[3][1] = c[3][1] - coeff * c[3][3];
c[4][1] = c[4][1] - coeff * c[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
c[0][2] = c[0][2] - coeff * c[0][3];
c[1][2] = c[1][2] - coeff * c[1][3];
c[2][2] = c[2][2] - coeff * c[2][3];
c[3][2] = c[3][2] - coeff * c[3][3];
c[4][2] = c[4][2] - coeff * c[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
c[0][4] = c[0][4] - coeff * c[0][3];
c[1][4] = c[1][4] - coeff * c[1][3];
c[2][4] = c[2][4] - coeff * c[2][3];
c[3][4] = c[3][4] - coeff * c[3][3];
c[4][4] = c[4][4] - coeff * c[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
c[0][4] = c[0][4] * pivot;
c[1][4] = c[1][4] * pivot;
c[2][4] = c[2][4] * pivot;
c[3][4] = c[3][4] * pivot;
c[4][4] = c[4][4] * pivot;
r[4] = r[4] * pivot;
coeff = lhs[4][0];
c[0][0] = c[0][0] - coeff * c[0][4];
c[1][0] = c[1][0] - coeff * c[1][4];
c[2][0] = c[2][0] - coeff * c[2][4];
c[3][0] = c[3][0] - coeff * c[3][4];
c[4][0] = c[4][0] - coeff * c[4][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
c[0][1] = c[0][1] - coeff * c[0][4];
c[1][1] = c[1][1] - coeff * c[1][4];
c[2][1] = c[2][1] - coeff * c[2][4];
c[3][1] = c[3][1] - coeff * c[3][4];
c[4][1] = c[4][1] - coeff * c[4][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
c[0][2] = c[0][2] - coeff * c[0][4];
c[1][2] = c[1][2] - coeff * c[1][4];
c[2][2] = c[2][2] - coeff * c[2][4];
c[3][2] = c[3][2] - coeff * c[3][4];
c[4][2] = c[4][2] - coeff * c[4][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
c[0][3] = c[0][3] - coeff * c[0][4];
c[1][3] = c[1][3] - coeff * c[1][4];
c[2][3] = c[2][3] - coeff * c[2][4];
c[3][3] = c[3][3] - coeff * c[3][4];
c[4][3] = c[4][3] - coeff * c[4][4];
r[3] = r[3] - coeff * r[4];
}
void binvrhs(double lhs[5][5], double r[5]) {
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
r[4] = r[4] * pivot;
coeff = lhs[4][0];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
r[3] = r[3] - coeff * r[4];
}
//---------------------------------------------------------------------
// verification routine
//---------------------------------------------------------------------
void verify(int no_time_steps, char *Class, int *verified) {
double xcrref[5];
double xceref[5];
double xcrdif[5];
double xcedif[5];
double epsilon;
double xce[5];
double xcr[5];
double dtref = 0.0;
int m;
//---------------------------------------------------------------------
// tolerance level
//---------------------------------------------------------------------
epsilon = 1.0e-08;
//---------------------------------------------------------------------
// compute the error norm and the residual norm, and exit if not printing
//---------------------------------------------------------------------
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*Class = 'U';
*verified = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
//---------------------------------------------------------------------
// reference data for 12X12X12 grids after 60 time steps, with DT = 1.0e-02
//---------------------------------------------------------------------
if((grid_points[0] == 12) && (grid_points[1] == 12) && (grid_points[2] == 12) && (no_time_steps == 60)) {
*Class = 'S';
dtref = 1.0e-2;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
//---------------------------------------------------------------------
// reference data for 24X24X24 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if((grid_points[0] == 24) && (grid_points[1] == 24) && (grid_points[2] == 24) && (no_time_steps == 200)) {
*Class = 'W';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
//---------------------------------------------------------------------
// reference data for 64X64X64 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if((grid_points[0] == 64) && (grid_points[1] == 64) && (grid_points[2] == 64) && (no_time_steps == 200)) {
*Class = 'A';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
//---------------------------------------------------------------------
// reference data for 102X102X102 grids after 200 time steps,
// with DT = 3.0e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 102) && (grid_points[1] == 102) && (grid_points[2] == 102) && (no_time_steps == 200)) {
*Class = 'B';
dtref = 3.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
//---------------------------------------------------------------------
// reference data for 162X162X162 grids after 200 time steps,
// with DT = 1.0e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 162) && (grid_points[1] == 162) && (grid_points[2] == 162) && (no_time_steps == 200)) {
*Class = 'C';
dtref = 1.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
//---------------------------------------------------------------------
// reference data for 408x408x408 grids after 250 time steps,
// with DT = 0.2e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 408) && (grid_points[1] == 408) && (grid_points[2] == 408) && (no_time_steps == 250)) {
*Class = 'D';
dtref = 0.2e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.2533188551738e+05;
xcrref[1] = 0.2346393716980e+04;
xcrref[2] = 0.6294554366904e+04;
xcrref[3] = 0.5352565376030e+04;
xcrref[4] = 0.3905864038618e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.3100009377557e+03;
xceref[1] = 0.2424086324913e+02;
xceref[2] = 0.7782212022645e+02;
xceref[3] = 0.6835623860116e+02;
xceref[4] = 0.6065737200368e+03;
//---------------------------------------------------------------------
// reference data for 1020x1020x1020 grids after 250 time steps,
// with DT = 0.4e-05
//---------------------------------------------------------------------
}
else if((grid_points[0] == 1020) && (grid_points[1] == 1020) && (grid_points[2] == 1020) && (no_time_steps == 250)) {
*Class = 'E';
dtref = 0.4e-5;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.9795372484517e+05;
xcrref[1] = 0.9739814511521e+04;
xcrref[2] = 0.2467606342965e+05;
xcrref[3] = 0.2092419572860e+05;
xcrref[4] = 0.1392138856939e+06;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4327562208414e+03;
xceref[1] = 0.3699051964887e+02;
xceref[2] = 0.1089845040954e+03;
xceref[3] = 0.9462517622043e+02;
xceref[4] = 0.7765512765309e+03;
}
else {
*verified = 0;
}
//---------------------------------------------------------------------
// verification test for residuals if gridsize is one of
// the defined grid sizes above (*Class != 'U')
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Compute the difference of solution values and the known reference values.
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]);
xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]);
}
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
if(*Class != 'U') {
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
*verified = (fabs(dt - dtref) <= epsilon);
if(!(*verified)) {
*Class = 'U';
printf(" DT does not match the reference value of %15.8E\n", dtref);
}
}
else {
printf(" Unknown class\n");
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
}
else {
printf(" RMS-norms of residual\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xcr[m]);
}
else if(xcrdif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
}
else {
printf(" RMS-norms of solution error\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xce[m]);
}
else if(xcedif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
}
if(*Class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if(*verified) {
printf(" Verification Successful\n");
}
else {
printf(" Verification failed\n");
}
}
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve() {
int i, j, k, m, n, isize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0] - 1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(k = 1; k <= grid_points[2] - 2; k++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, tmp1, tmp2, tmp3) firstprivate(isize, k, j, c2, c1, con43, c3c4, c1345, rho_i, u, qs, square)
for(i = 0; i <= isize; i++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1]) + c2 * qs[k][j][i];
fjac[i][1][1] = (2.0 - c2) * (u[k][j][i][1] / u[k][j][i][0]);
fjac[i][2][1] = -c2 * (u[k][j][i][2] * tmp1);
fjac[i][3][1] = -c2 * (u[k][j][i][3] * tmp1);
fjac[i][4][1] = c2;
fjac[i][0][2] = -(u[k][j][i][1] * u[k][j][i][2]) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = -(u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * (u[k][j][i][1] * tmp2);
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1 - c2 * (u[k][j][i][1] * u[k][j][i][1] * tmp2 + qs[k][j][i]);
fjac[i][2][4] = -c2 * (u[k][j][i][2] * u[k][j][i][1]) * tmp2;
fjac[i][3][4] = -c2 * (u[k][j][i][3] * u[k][j][i][1]) * tmp2;
fjac[i][4][4] = c1 * (u[k][j][i][1] * tmp1);
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = -con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = -c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = -c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = -(con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[i][2][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[i][3][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[i][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
#pragma omp parallel for default(shared) private(i, tmp1, tmp2) firstprivate(isize, dt, tx1, tx2, dx1, dx2, dx3, dx4, dx5, fjac, njac)
for(i = 1; i <= isize - 1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][0][0][0] = -tmp2 * fjac[i - 1][0][0] - tmp1 * njac[i - 1][0][0] - tmp1 * dx1;
lhs[i][0][1][0] = -tmp2 * fjac[i - 1][1][0] - tmp1 * njac[i - 1][1][0];
lhs[i][0][2][0] = -tmp2 * fjac[i - 1][2][0] - tmp1 * njac[i - 1][2][0];
lhs[i][0][3][0] = -tmp2 * fjac[i - 1][3][0] - tmp1 * njac[i - 1][3][0];
lhs[i][0][4][0] = -tmp2 * fjac[i - 1][4][0] - tmp1 * njac[i - 1][4][0];
lhs[i][0][0][1] = -tmp2 * fjac[i - 1][0][1] - tmp1 * njac[i - 1][0][1];
lhs[i][0][1][1] = -tmp2 * fjac[i - 1][1][1] - tmp1 * njac[i - 1][1][1] - tmp1 * dx2;
lhs[i][0][2][1] = -tmp2 * fjac[i - 1][2][1] - tmp1 * njac[i - 1][2][1];
lhs[i][0][3][1] = -tmp2 * fjac[i - 1][3][1] - tmp1 * njac[i - 1][3][1];
lhs[i][0][4][1] = -tmp2 * fjac[i - 1][4][1] - tmp1 * njac[i - 1][4][1];
lhs[i][0][0][2] = -tmp2 * fjac[i - 1][0][2] - tmp1 * njac[i - 1][0][2];
lhs[i][0][1][2] = -tmp2 * fjac[i - 1][1][2] - tmp1 * njac[i - 1][1][2];
lhs[i][0][2][2] = -tmp2 * fjac[i - 1][2][2] - tmp1 * njac[i - 1][2][2] - tmp1 * dx3;
lhs[i][0][3][2] = -tmp2 * fjac[i - 1][3][2] - tmp1 * njac[i - 1][3][2];
lhs[i][0][4][2] = -tmp2 * fjac[i - 1][4][2] - tmp1 * njac[i - 1][4][2];
lhs[i][0][0][3] = -tmp2 * fjac[i - 1][0][3] - tmp1 * njac[i - 1][0][3];
lhs[i][0][1][3] = -tmp2 * fjac[i - 1][1][3] - tmp1 * njac[i - 1][1][3];
lhs[i][0][2][3] = -tmp2 * fjac[i - 1][2][3] - tmp1 * njac[i - 1][2][3];
lhs[i][0][3][3] = -tmp2 * fjac[i - 1][3][3] - tmp1 * njac[i - 1][3][3] - tmp1 * dx4;
lhs[i][0][4][3] = -tmp2 * fjac[i - 1][4][3] - tmp1 * njac[i - 1][4][3];
lhs[i][0][0][4] = -tmp2 * fjac[i - 1][0][4] - tmp1 * njac[i - 1][0][4];
lhs[i][0][1][4] = -tmp2 * fjac[i - 1][1][4] - tmp1 * njac[i - 1][1][4];
lhs[i][0][2][4] = -tmp2 * fjac[i - 1][2][4] - tmp1 * njac[i - 1][2][4];
lhs[i][0][3][4] = -tmp2 * fjac[i - 1][3][4] - tmp1 * njac[i - 1][3][4];
lhs[i][0][4][4] = -tmp2 * fjac[i - 1][4][4] - tmp1 * njac[i - 1][4][4] - tmp1 * dx5;
lhs[i][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][0][0] + tmp1 * 2.0 * dx1;
lhs[i][1][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][1][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][1][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][1][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][1][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][1][1] + tmp1 * 2.0 * dx2;
lhs[i][1][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][1][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][1][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][1][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][1][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][2][2] + tmp1 * 2.0 * dx3;
lhs[i][1][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][1][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][1][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][1][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][1][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][3][3] + tmp1 * 2.0 * dx4;
lhs[i][1][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][1][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][1][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][1][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][1][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][4][4] + tmp1 * 2.0 * dx5;
lhs[i][2][0][0] = tmp2 * fjac[i + 1][0][0] - tmp1 * njac[i + 1][0][0] - tmp1 * dx1;
lhs[i][2][1][0] = tmp2 * fjac[i + 1][1][0] - tmp1 * njac[i + 1][1][0];
lhs[i][2][2][0] = tmp2 * fjac[i + 1][2][0] - tmp1 * njac[i + 1][2][0];
lhs[i][2][3][0] = tmp2 * fjac[i + 1][3][0] - tmp1 * njac[i + 1][3][0];
lhs[i][2][4][0] = tmp2 * fjac[i + 1][4][0] - tmp1 * njac[i + 1][4][0];
lhs[i][2][0][1] = tmp2 * fjac[i + 1][0][1] - tmp1 * njac[i + 1][0][1];
lhs[i][2][1][1] = tmp2 * fjac[i + 1][1][1] - tmp1 * njac[i + 1][1][1] - tmp1 * dx2;
lhs[i][2][2][1] = tmp2 * fjac[i + 1][2][1] - tmp1 * njac[i + 1][2][1];
lhs[i][2][3][1] = tmp2 * fjac[i + 1][3][1] - tmp1 * njac[i + 1][3][1];
lhs[i][2][4][1] = tmp2 * fjac[i + 1][4][1] - tmp1 * njac[i + 1][4][1];
lhs[i][2][0][2] = tmp2 * fjac[i + 1][0][2] - tmp1 * njac[i + 1][0][2];
lhs[i][2][1][2] = tmp2 * fjac[i + 1][1][2] - tmp1 * njac[i + 1][1][2];
lhs[i][2][2][2] = tmp2 * fjac[i + 1][2][2] - tmp1 * njac[i + 1][2][2] - tmp1 * dx3;
lhs[i][2][3][2] = tmp2 * fjac[i + 1][3][2] - tmp1 * njac[i + 1][3][2];
lhs[i][2][4][2] = tmp2 * fjac[i + 1][4][2] - tmp1 * njac[i + 1][4][2];
lhs[i][2][0][3] = tmp2 * fjac[i + 1][0][3] - tmp1 * njac[i + 1][0][3];
lhs[i][2][1][3] = tmp2 * fjac[i + 1][1][3] - tmp1 * njac[i + 1][1][3];
lhs[i][2][2][3] = tmp2 * fjac[i + 1][2][3] - tmp1 * njac[i + 1][2][3];
lhs[i][2][3][3] = tmp2 * fjac[i + 1][3][3] - tmp1 * njac[i + 1][3][3] - tmp1 * dx4;
lhs[i][2][4][3] = tmp2 * fjac[i + 1][4][3] - tmp1 * njac[i + 1][4][3];
lhs[i][2][0][4] = tmp2 * fjac[i + 1][0][4] - tmp1 * njac[i + 1][0][4];
lhs[i][2][1][4] = tmp2 * fjac[i + 1][1][4] - tmp1 * njac[i + 1][1][4];
lhs[i][2][2][4] = tmp2 * fjac[i + 1][2][4] - tmp1 * njac[i + 1][2][4];
lhs[i][2][3][4] = tmp2 * fjac[i + 1][3][4] - tmp1 * njac[i + 1][3][4];
lhs[i][2][4][4] = tmp2 * fjac[i + 1][4][4] - tmp1 * njac[i + 1][4][4] - tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[k][j][0]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = 1; i <= isize - 1; i++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][0], rhs[k][j][i - 1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][0], lhs[i - 1][2], lhs[i][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[i][1], lhs[i][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][0], rhs[k][j][isize - 1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][0], lhs[isize - 1][2], lhs[isize][1]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[isize][1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = isize - 1; i >= 0; i--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[i][2][n][m] * rhs[k][j][i + 1][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Y direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void y_solve() {
int i, j, k, m, n, jsize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three y-factors
//---------------------------------------------------------------------
jsize = grid_points[1] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the tri-diagonal matrix;
// determine a (labeled f) and n jacobians for cell c
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(k = 1; k <= grid_points[2] - 2; k++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(i = 1; i <= grid_points[0] - 2; i++) {
#pragma omp parallel for default(shared) private(j, tmp1, tmp2, tmp3) firstprivate(jsize, k, i, c2, c1, c3c4, con43, c1345, rho_i, u, qs, square)
for(j = 0; j <= jsize; j++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[j][0][0] = 0.0;
fjac[j][1][0] = 0.0;
fjac[j][2][0] = 1.0;
fjac[j][3][0] = 0.0;
fjac[j][4][0] = 0.0;
fjac[j][0][1] = -(u[k][j][i][1] * u[k][j][i][2]) * tmp2;
fjac[j][1][1] = u[k][j][i][2] * tmp1;
fjac[j][2][1] = u[k][j][i][1] * tmp1;
fjac[j][3][1] = 0.0;
fjac[j][4][1] = 0.0;
fjac[j][0][2] = -(u[k][j][i][2] * u[k][j][i][2] * tmp2) + c2 * qs[k][j][i];
fjac[j][1][2] = -c2 * u[k][j][i][1] * tmp1;
fjac[j][2][2] = (2.0 - c2) * u[k][j][i][2] * tmp1;
fjac[j][3][2] = -c2 * u[k][j][i][3] * tmp1;
fjac[j][4][2] = c2;
fjac[j][0][3] = -(u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[j][1][3] = 0.0;
fjac[j][2][3] = u[k][j][i][3] * tmp1;
fjac[j][3][3] = u[k][j][i][2] * tmp1;
fjac[j][4][3] = 0.0;
fjac[j][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * u[k][j][i][2] * tmp2;
fjac[j][1][4] = -c2 * u[k][j][i][1] * u[k][j][i][2] * tmp2;
fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1 - c2 * (qs[k][j][i] + u[k][j][i][2] * u[k][j][i][2] * tmp2);
fjac[j][3][4] = -c2 * (u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1;
njac[j][0][0] = 0.0;
njac[j][1][0] = 0.0;
njac[j][2][0] = 0.0;
njac[j][3][0] = 0.0;
njac[j][4][0] = 0.0;
njac[j][0][1] = -c3c4 * tmp2 * u[k][j][i][1];
njac[j][1][1] = c3c4 * tmp1;
njac[j][2][1] = 0.0;
njac[j][3][1] = 0.0;
njac[j][4][1] = 0.0;
njac[j][0][2] = -con43 * c3c4 * tmp2 * u[k][j][i][2];
njac[j][1][2] = 0.0;
njac[j][2][2] = con43 * c3c4 * tmp1;
njac[j][3][2] = 0.0;
njac[j][4][2] = 0.0;
njac[j][0][3] = -c3c4 * tmp2 * u[k][j][i][3];
njac[j][1][3] = 0.0;
njac[j][2][3] = 0.0;
njac[j][3][3] = c3c4 * tmp1;
njac[j][4][3] = 0.0;
njac[j][0][4] = -(c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[j][1][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[j][2][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[j][3][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[j][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now joacobians set, so form left hand side in y direction
//---------------------------------------------------------------------
lhsinit(lhs, jsize);
#pragma omp parallel for default(shared) private(j, tmp1, tmp2) firstprivate(jsize, dt, ty1, ty2, dy1, dy2, dy3, dy4, dy5, fjac, njac)
for(j = 1; j <= jsize - 1; j++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[j][0][0][0] = -tmp2 * fjac[j - 1][0][0] - tmp1 * njac[j - 1][0][0] - tmp1 * dy1;
lhs[j][0][1][0] = -tmp2 * fjac[j - 1][1][0] - tmp1 * njac[j - 1][1][0];
lhs[j][0][2][0] = -tmp2 * fjac[j - 1][2][0] - tmp1 * njac[j - 1][2][0];
lhs[j][0][3][0] = -tmp2 * fjac[j - 1][3][0] - tmp1 * njac[j - 1][3][0];
lhs[j][0][4][0] = -tmp2 * fjac[j - 1][4][0] - tmp1 * njac[j - 1][4][0];
lhs[j][0][0][1] = -tmp2 * fjac[j - 1][0][1] - tmp1 * njac[j - 1][0][1];
lhs[j][0][1][1] = -tmp2 * fjac[j - 1][1][1] - tmp1 * njac[j - 1][1][1] - tmp1 * dy2;
lhs[j][0][2][1] = -tmp2 * fjac[j - 1][2][1] - tmp1 * njac[j - 1][2][1];
lhs[j][0][3][1] = -tmp2 * fjac[j - 1][3][1] - tmp1 * njac[j - 1][3][1];
lhs[j][0][4][1] = -tmp2 * fjac[j - 1][4][1] - tmp1 * njac[j - 1][4][1];
lhs[j][0][0][2] = -tmp2 * fjac[j - 1][0][2] - tmp1 * njac[j - 1][0][2];
lhs[j][0][1][2] = -tmp2 * fjac[j - 1][1][2] - tmp1 * njac[j - 1][1][2];
lhs[j][0][2][2] = -tmp2 * fjac[j - 1][2][2] - tmp1 * njac[j - 1][2][2] - tmp1 * dy3;
lhs[j][0][3][2] = -tmp2 * fjac[j - 1][3][2] - tmp1 * njac[j - 1][3][2];
lhs[j][0][4][2] = -tmp2 * fjac[j - 1][4][2] - tmp1 * njac[j - 1][4][2];
lhs[j][0][0][3] = -tmp2 * fjac[j - 1][0][3] - tmp1 * njac[j - 1][0][3];
lhs[j][0][1][3] = -tmp2 * fjac[j - 1][1][3] - tmp1 * njac[j - 1][1][3];
lhs[j][0][2][3] = -tmp2 * fjac[j - 1][2][3] - tmp1 * njac[j - 1][2][3];
lhs[j][0][3][3] = -tmp2 * fjac[j - 1][3][3] - tmp1 * njac[j - 1][3][3] - tmp1 * dy4;
lhs[j][0][4][3] = -tmp2 * fjac[j - 1][4][3] - tmp1 * njac[j - 1][4][3];
lhs[j][0][0][4] = -tmp2 * fjac[j - 1][0][4] - tmp1 * njac[j - 1][0][4];
lhs[j][0][1][4] = -tmp2 * fjac[j - 1][1][4] - tmp1 * njac[j - 1][1][4];
lhs[j][0][2][4] = -tmp2 * fjac[j - 1][2][4] - tmp1 * njac[j - 1][2][4];
lhs[j][0][3][4] = -tmp2 * fjac[j - 1][3][4] - tmp1 * njac[j - 1][3][4];
lhs[j][0][4][4] = -tmp2 * fjac[j - 1][4][4] - tmp1 * njac[j - 1][4][4] - tmp1 * dy5;
lhs[j][1][0][0] = 1.0 + tmp1 * 2.0 * njac[j][0][0] + tmp1 * 2.0 * dy1;
lhs[j][1][1][0] = tmp1 * 2.0 * njac[j][1][0];
lhs[j][1][2][0] = tmp1 * 2.0 * njac[j][2][0];
lhs[j][1][3][0] = tmp1 * 2.0 * njac[j][3][0];
lhs[j][1][4][0] = tmp1 * 2.0 * njac[j][4][0];
lhs[j][1][0][1] = tmp1 * 2.0 * njac[j][0][1];
lhs[j][1][1][1] = 1.0 + tmp1 * 2.0 * njac[j][1][1] + tmp1 * 2.0 * dy2;
lhs[j][1][2][1] = tmp1 * 2.0 * njac[j][2][1];
lhs[j][1][3][1] = tmp1 * 2.0 * njac[j][3][1];
lhs[j][1][4][1] = tmp1 * 2.0 * njac[j][4][1];
lhs[j][1][0][2] = tmp1 * 2.0 * njac[j][0][2];
lhs[j][1][1][2] = tmp1 * 2.0 * njac[j][1][2];
lhs[j][1][2][2] = 1.0 + tmp1 * 2.0 * njac[j][2][2] + tmp1 * 2.0 * dy3;
lhs[j][1][3][2] = tmp1 * 2.0 * njac[j][3][2];
lhs[j][1][4][2] = tmp1 * 2.0 * njac[j][4][2];
lhs[j][1][0][3] = tmp1 * 2.0 * njac[j][0][3];
lhs[j][1][1][3] = tmp1 * 2.0 * njac[j][1][3];
lhs[j][1][2][3] = tmp1 * 2.0 * njac[j][2][3];
lhs[j][1][3][3] = 1.0 + tmp1 * 2.0 * njac[j][3][3] + tmp1 * 2.0 * dy4;
lhs[j][1][4][3] = tmp1 * 2.0 * njac[j][4][3];
lhs[j][1][0][4] = tmp1 * 2.0 * njac[j][0][4];
lhs[j][1][1][4] = tmp1 * 2.0 * njac[j][1][4];
lhs[j][1][2][4] = tmp1 * 2.0 * njac[j][2][4];
lhs[j][1][3][4] = tmp1 * 2.0 * njac[j][3][4];
lhs[j][1][4][4] = 1.0 + tmp1 * 2.0 * njac[j][4][4] + tmp1 * 2.0 * dy5;
lhs[j][2][0][0] = tmp2 * fjac[j + 1][0][0] - tmp1 * njac[j + 1][0][0] - tmp1 * dy1;
lhs[j][2][1][0] = tmp2 * fjac[j + 1][1][0] - tmp1 * njac[j + 1][1][0];
lhs[j][2][2][0] = tmp2 * fjac[j + 1][2][0] - tmp1 * njac[j + 1][2][0];
lhs[j][2][3][0] = tmp2 * fjac[j + 1][3][0] - tmp1 * njac[j + 1][3][0];
lhs[j][2][4][0] = tmp2 * fjac[j + 1][4][0] - tmp1 * njac[j + 1][4][0];
lhs[j][2][0][1] = tmp2 * fjac[j + 1][0][1] - tmp1 * njac[j + 1][0][1];
lhs[j][2][1][1] = tmp2 * fjac[j + 1][1][1] - tmp1 * njac[j + 1][1][1] - tmp1 * dy2;
lhs[j][2][2][1] = tmp2 * fjac[j + 1][2][1] - tmp1 * njac[j + 1][2][1];
lhs[j][2][3][1] = tmp2 * fjac[j + 1][3][1] - tmp1 * njac[j + 1][3][1];
lhs[j][2][4][1] = tmp2 * fjac[j + 1][4][1] - tmp1 * njac[j + 1][4][1];
lhs[j][2][0][2] = tmp2 * fjac[j + 1][0][2] - tmp1 * njac[j + 1][0][2];
lhs[j][2][1][2] = tmp2 * fjac[j + 1][1][2] - tmp1 * njac[j + 1][1][2];
lhs[j][2][2][2] = tmp2 * fjac[j + 1][2][2] - tmp1 * njac[j + 1][2][2] - tmp1 * dy3;
lhs[j][2][3][2] = tmp2 * fjac[j + 1][3][2] - tmp1 * njac[j + 1][3][2];
lhs[j][2][4][2] = tmp2 * fjac[j + 1][4][2] - tmp1 * njac[j + 1][4][2];
lhs[j][2][0][3] = tmp2 * fjac[j + 1][0][3] - tmp1 * njac[j + 1][0][3];
lhs[j][2][1][3] = tmp2 * fjac[j + 1][1][3] - tmp1 * njac[j + 1][1][3];
lhs[j][2][2][3] = tmp2 * fjac[j + 1][2][3] - tmp1 * njac[j + 1][2][3];
lhs[j][2][3][3] = tmp2 * fjac[j + 1][3][3] - tmp1 * njac[j + 1][3][3] - tmp1 * dy4;
lhs[j][2][4][3] = tmp2 * fjac[j + 1][4][3] - tmp1 * njac[j + 1][4][3];
lhs[j][2][0][4] = tmp2 * fjac[j + 1][0][4] - tmp1 * njac[j + 1][0][4];
lhs[j][2][1][4] = tmp2 * fjac[j + 1][1][4] - tmp1 * njac[j + 1][1][4];
lhs[j][2][2][4] = tmp2 * fjac[j + 1][2][4] - tmp1 * njac[j + 1][2][4];
lhs[j][2][3][4] = tmp2 * fjac[j + 1][3][4] - tmp1 * njac[j + 1][3][4];
lhs[j][2][4][4] = tmp2 * fjac[j + 1][4][4] - tmp1 * njac[j + 1][4][4] - tmp1 * dy5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(JMAX) and rhs'(JMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][0][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[k][0][i]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = 1; j <= jsize - 1; j++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(j-1) from lhs_vector(j)
//
// rhs(j) = rhs(j) - A*rhs(j-1)
//-------------------------------------------------------------------
matvec_sub(lhs[j][0], rhs[k][j - 1][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(j) = B(j) - C(j-1)*A(j)
//-------------------------------------------------------------------
matmul_sub(lhs[j][0], lhs[j - 1][2], lhs[j][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[j][1], lhs[j][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[jsize][0], rhs[k][jsize - 1][i], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
// matmul_sub(AA,i,jsize,k,c,
// $ CC,i,jsize-1,k,c,BB,i,jsize,k)
//---------------------------------------------------------------------
matmul_sub(lhs[jsize][0], lhs[jsize - 1][2], lhs[jsize][1]);
//---------------------------------------------------------------------
// multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[jsize][1], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(jsize)=rhs(jsize)
// else assume U(jsize) is loaded in un pack backsub_info
// so just use it
// after u(jstart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = jsize - 1; j >= 0; j--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][2][n][m] * rhs[k][j + 1][i][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve() {
int i, j, k, m, n, ksize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(j = 1; j <= grid_points[1] - 2; j++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(i = 1; i <= grid_points[0] - 2; i++) {
#pragma omp parallel for default(shared) private(k, tmp1, tmp2, tmp3) firstprivate(ksize, j, i, c2, c1, c3c4, con43, c3, c4, c1345, u, qs, square)
for(k = 0; k <= ksize; k++) {
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = -(u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = -(u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = -(u[k][j][i][3] * u[k][j][i][3] * tmp2) + c2 * qs[k][j][i];
fjac[k][1][3] = -c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = -c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = (2.0 - c2) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * u[k][j][i][3] * tmp2;
fjac[k][1][4] = -c2 * (u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[k][2][4] = -c2 * (u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[k][3][4] = c1 * (u[k][j][i][4] * tmp1) - c2 * (qs[k][j][i] + u[k][j][i][3] * u[k][j][i][3] * tmp2);
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = -c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = -c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = -con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = -(c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[k][2][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[k][3][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[k][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
#pragma omp parallel for default(shared) private(k, tmp1, tmp2) firstprivate(ksize, dt, tz1, tz2, dz1, dz2, dz3, dz4, dz5, fjac, njac)
for(k = 1; k <= ksize - 1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][0][0][0] = -tmp2 * fjac[k - 1][0][0] - tmp1 * njac[k - 1][0][0] - tmp1 * dz1;
lhs[k][0][1][0] = -tmp2 * fjac[k - 1][1][0] - tmp1 * njac[k - 1][1][0];
lhs[k][0][2][0] = -tmp2 * fjac[k - 1][2][0] - tmp1 * njac[k - 1][2][0];
lhs[k][0][3][0] = -tmp2 * fjac[k - 1][3][0] - tmp1 * njac[k - 1][3][0];
lhs[k][0][4][0] = -tmp2 * fjac[k - 1][4][0] - tmp1 * njac[k - 1][4][0];
lhs[k][0][0][1] = -tmp2 * fjac[k - 1][0][1] - tmp1 * njac[k - 1][0][1];
lhs[k][0][1][1] = -tmp2 * fjac[k - 1][1][1] - tmp1 * njac[k - 1][1][1] - tmp1 * dz2;
lhs[k][0][2][1] = -tmp2 * fjac[k - 1][2][1] - tmp1 * njac[k - 1][2][1];
lhs[k][0][3][1] = -tmp2 * fjac[k - 1][3][1] - tmp1 * njac[k - 1][3][1];
lhs[k][0][4][1] = -tmp2 * fjac[k - 1][4][1] - tmp1 * njac[k - 1][4][1];
lhs[k][0][0][2] = -tmp2 * fjac[k - 1][0][2] - tmp1 * njac[k - 1][0][2];
lhs[k][0][1][2] = -tmp2 * fjac[k - 1][1][2] - tmp1 * njac[k - 1][1][2];
lhs[k][0][2][2] = -tmp2 * fjac[k - 1][2][2] - tmp1 * njac[k - 1][2][2] - tmp1 * dz3;
lhs[k][0][3][2] = -tmp2 * fjac[k - 1][3][2] - tmp1 * njac[k - 1][3][2];
lhs[k][0][4][2] = -tmp2 * fjac[k - 1][4][2] - tmp1 * njac[k - 1][4][2];
lhs[k][0][0][3] = -tmp2 * fjac[k - 1][0][3] - tmp1 * njac[k - 1][0][3];
lhs[k][0][1][3] = -tmp2 * fjac[k - 1][1][3] - tmp1 * njac[k - 1][1][3];
lhs[k][0][2][3] = -tmp2 * fjac[k - 1][2][3] - tmp1 * njac[k - 1][2][3];
lhs[k][0][3][3] = -tmp2 * fjac[k - 1][3][3] - tmp1 * njac[k - 1][3][3] - tmp1 * dz4;
lhs[k][0][4][3] = -tmp2 * fjac[k - 1][4][3] - tmp1 * njac[k - 1][4][3];
lhs[k][0][0][4] = -tmp2 * fjac[k - 1][0][4] - tmp1 * njac[k - 1][0][4];
lhs[k][0][1][4] = -tmp2 * fjac[k - 1][1][4] - tmp1 * njac[k - 1][1][4];
lhs[k][0][2][4] = -tmp2 * fjac[k - 1][2][4] - tmp1 * njac[k - 1][2][4];
lhs[k][0][3][4] = -tmp2 * fjac[k - 1][3][4] - tmp1 * njac[k - 1][3][4];
lhs[k][0][4][4] = -tmp2 * fjac[k - 1][4][4] - tmp1 * njac[k - 1][4][4] - tmp1 * dz5;
lhs[k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[k][0][0] + tmp1 * 2.0 * dz1;
lhs[k][1][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][1][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][1][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][1][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][1][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[k][1][1] + tmp1 * 2.0 * dz2;
lhs[k][1][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][1][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][1][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][1][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][1][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[k][2][2] + tmp1 * 2.0 * dz3;
lhs[k][1][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][1][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][1][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][1][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][1][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[k][3][3] + tmp1 * 2.0 * dz4;
lhs[k][1][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][1][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][1][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][1][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][1][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[k][4][4] + tmp1 * 2.0 * dz5;
lhs[k][2][0][0] = tmp2 * fjac[k + 1][0][0] - tmp1 * njac[k + 1][0][0] - tmp1 * dz1;
lhs[k][2][1][0] = tmp2 * fjac[k + 1][1][0] - tmp1 * njac[k + 1][1][0];
lhs[k][2][2][0] = tmp2 * fjac[k + 1][2][0] - tmp1 * njac[k + 1][2][0];
lhs[k][2][3][0] = tmp2 * fjac[k + 1][3][0] - tmp1 * njac[k + 1][3][0];
lhs[k][2][4][0] = tmp2 * fjac[k + 1][4][0] - tmp1 * njac[k + 1][4][0];
lhs[k][2][0][1] = tmp2 * fjac[k + 1][0][1] - tmp1 * njac[k + 1][0][1];
lhs[k][2][1][1] = tmp2 * fjac[k + 1][1][1] - tmp1 * njac[k + 1][1][1] - tmp1 * dz2;
lhs[k][2][2][1] = tmp2 * fjac[k + 1][2][1] - tmp1 * njac[k + 1][2][1];
lhs[k][2][3][1] = tmp2 * fjac[k + 1][3][1] - tmp1 * njac[k + 1][3][1];
lhs[k][2][4][1] = tmp2 * fjac[k + 1][4][1] - tmp1 * njac[k + 1][4][1];
lhs[k][2][0][2] = tmp2 * fjac[k + 1][0][2] - tmp1 * njac[k + 1][0][2];
lhs[k][2][1][2] = tmp2 * fjac[k + 1][1][2] - tmp1 * njac[k + 1][1][2];
lhs[k][2][2][2] = tmp2 * fjac[k + 1][2][2] - tmp1 * njac[k + 1][2][2] - tmp1 * dz3;
lhs[k][2][3][2] = tmp2 * fjac[k + 1][3][2] - tmp1 * njac[k + 1][3][2];
lhs[k][2][4][2] = tmp2 * fjac[k + 1][4][2] - tmp1 * njac[k + 1][4][2];
lhs[k][2][0][3] = tmp2 * fjac[k + 1][0][3] - tmp1 * njac[k + 1][0][3];
lhs[k][2][1][3] = tmp2 * fjac[k + 1][1][3] - tmp1 * njac[k + 1][1][3];
lhs[k][2][2][3] = tmp2 * fjac[k + 1][2][3] - tmp1 * njac[k + 1][2][3];
lhs[k][2][3][3] = tmp2 * fjac[k + 1][3][3] - tmp1 * njac[k + 1][3][3] - tmp1 * dz4;
lhs[k][2][4][3] = tmp2 * fjac[k + 1][4][3] - tmp1 * njac[k + 1][4][3];
lhs[k][2][0][4] = tmp2 * fjac[k + 1][0][4] - tmp1 * njac[k + 1][0][4];
lhs[k][2][1][4] = tmp2 * fjac[k + 1][1][4] - tmp1 * njac[k + 1][1][4];
lhs[k][2][2][4] = tmp2 * fjac[k + 1][2][4] - tmp1 * njac[k + 1][2][4];
lhs[k][2][3][4] = tmp2 * fjac[k + 1][3][4] - tmp1 * njac[k + 1][3][4];
lhs[k][2][4][4] = tmp2 * fjac[k + 1][4][4] - tmp1 * njac[k + 1][4][4] - tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[0][j][i]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = 1; k <= ksize - 1; k++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][0], rhs[k - 1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][0], lhs[k - 1][2], lhs[k][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[k][1], lhs[k][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][0], rhs[ksize - 1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][0], lhs[ksize - 1][2], lhs[ksize][1]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[ksize][1], rhs[ksize][j][i]);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = ksize - 1; k >= 0; k--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][2][n][m] * rhs[k + 1][j][i][n];
}
}
}
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
|
testOPENMP.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int fibonacci(int n){
if(n<=1){
return 1;
}
return fibonacci(n-1)+fibonacci(n-2);
}
int main(int argc, char *argv[])
{
int sum =0;
#pragma omp parallel for reduction (+ : sum)
for (int i = 0; i < 11; i++)
{
sum += fibonacci(i);
}
printf("%d\n",sum);
} /* All threads join master thread and disband */
|
main.c | // C Compiler flag: -fopenmp
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <time.h>
#define N 20
int main(int argc, char *argv[])
{
srand(time(NULL));
omp_set_dynamic(0); // запретить библиотеке openmp менять число потоков во время исполнения
//omp_set_num_threads(2); // установить число потоков в X
int threadsCount = omp_get_max_threads();
int n = 210;
int control_sqr = n * n;
printf("control sqr is %d\n", control_sqr);
int sqr = 0;
int max = n * 2 + 1;
#pragma omp parallel for reduction(+:sqr)
for (int i = 1; i < max; i += 2)
{
sqr += i;
}
printf("sqr is %d\n", sqr);
if (control_sqr == sqr)
{
printf("the answer is correct\n");
}
return 0;
}
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include "common/Schema.h"
#include <random>
#include <memory>
#include <cstring>
#include "segcore/SegmentGrowing.h"
#include "segcore/SegmentSealed.h"
#include "Constants.h"
#include <boost/algorithm/string/predicate.hpp>
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include <query/SearchOnIndex.h>
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<char> rows_;
std::vector<aligned_vector<uint8_t>> cols_;
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
RowBasedRawData raw_;
template <typename T>
auto
get_col(int index) const {
auto& target = cols_.at(index);
std::vector<T> ret(target.size() / sizeof(T));
memcpy(ret.data(), target.data(), target.size());
return ret;
}
template <typename T>
auto
get_mutable_col(int index) {
auto& target = cols_.at(index);
assert(target.size() == row_ids_.size() * sizeof(T));
auto ptr = reinterpret_cast<T*>(target.data());
return ptr;
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed);
void
generate_rows(int64_t N, SchemaPtr schema);
};
inline void
GeneratedData::generate_rows(int64_t N, SchemaPtr schema) {
std::vector<int> offset_infos(schema->size() + 1, 0);
auto sizeof_infos = schema->get_sizeof_infos();
std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1);
int64_t len_per_row = offset_infos.back();
assert(len_per_row == schema->get_total_sizeof());
std::vector<char> result(len_per_row * N);
for (int index = 0; index < N; ++index) {
for (int fid = 0; fid < schema->size(); ++fid) {
auto len = sizeof_infos[fid];
auto offset = offset_infos[fid];
auto src = cols_[fid].data() + index * len;
auto dst = result.data() + offset + index * len_per_row;
memcpy(dst, src, len);
}
}
rows_ = std::move(result);
raw_.raw_data = rows_.data();
raw_.sizeof_per_row = schema->get_total_sizeof();
raw_.count = N;
}
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42) {
using std::vector;
std::vector<aligned_vector<uint8_t>> cols;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_cols = [&cols](auto& data) {
using T = std::remove_reference_t<decltype(data)>;
auto len = sizeof(typename T::value_type) * data.size();
auto ptr = aligned_vector<uint8_t>(len);
memcpy(ptr.data(), data.data(), len);
cols.emplace_back(std::move(ptr));
};
for (auto& field : schema->get_fields()) {
switch (field.get_data_type()) {
case engine::DataType::VECTOR_FLOAT: {
auto dim = field.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final);
break;
}
case engine::DataType::VECTOR_BINARY: {
auto dim = field.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data);
break;
}
case engine::DataType::INT64: {
vector<int64_t> data(N);
// begin with counter
if (starts_with(field.get_name().get(), "counter")) {
int64_t index = 0;
for (auto& x : data) {
x = index++;
}
} else {
for (auto& x : data) {
x = er() % (2 * N);
}
}
insert_cols(data);
break;
}
case engine::DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
case engine::DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.cols_ = std::move(cols);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i);
}
std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er);
res.generate_rows(N, schema);
return std::move(res);
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
QueryResultToJson(const QueryResult& qr) {
int64_t num_queries = qr.num_queries_;
int64_t topk = qr.topK_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(qr.internal_seg_offsets_[index]) + "->" +
std::to_string(qr.result_distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
info.blob = dataset.row_ids_.data();
info.row_count = dataset.row_ids_.size();
info.field_id = 0; // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
info.blob = dataset.timestamps_.data();
info.row_count = dataset.timestamps_.size();
info.field_id = 1;
seg.LoadFieldData(info);
}
int field_offset = 0;
for (auto& meta : seg.get_schema().get_fields()) {
LoadFieldDataInfo info;
info.field_id = meta.get_id().get();
info.row_count = row_count;
info.blob = dataset.cols_[field_offset].data();
seg.LoadFieldData(info);
++field_offset;
}
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, milvus::knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
} // namespace milvus::segcore
|
hmTriDistance.c | #include "hmTriDistance.h"
#include "hmVec3.h"
#include "hmUtility.h"
#include "hmConstants.h"
#include "hmVectorSizeT.h"
#include <float.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/time.h>
#include<OpenCL/opencl.h>
void hmTriDistanceInitialize( hmTriDistance* distance )
{
distance->surface = NULL;
hmDenseMatrixInitialize( &distance->isSource, 0, 0 );
hmDenseMatrixInitialize( &distance->distance, 0, 0 );
hmDenseMatrixInitialize( &distance->heatNeumann, 0, 0 );
hmDenseMatrixInitialize( &distance->heatDirichlet, 0, 0 );
hmDenseMatrixInitialize( &distance->potential, 0, 0 );
hmSparseMatrixInitialize( &distance->laplacian, 0, 0, 0 );
hmSparseMatrixInitialize( &distance->heatFlowNeumann, 0, 0, 0 );
hmSparseMatrixInitialize( &distance->heatFlowDirichlet, 0, 0, 0 );
hmCholeskyFactorInitialize( &distance->laplacianFactor );
hmCholeskyFactorInitialize( &distance->heatFlowNeumannFactor );
hmCholeskyFactorInitialize( &distance->heatFlowDirichletFactor );
/* by default, use pure Neumann boundary conditions */
distance->boundaryConditions = 0.;
hmVectorDoubleInitialize( &distance->startTimesProcessor );
hmVectorDoubleInitialize( &distance->startTimesWallClock );
}
void hmTriDistanceDestroy( hmTriDistance* distance )
{
hmDenseMatrixDestroy( &distance->isSource );
hmDenseMatrixDestroy( &distance->distance );
hmDenseMatrixDestroy( &distance->heatNeumann );
hmDenseMatrixDestroy( &distance->heatDirichlet );
hmDenseMatrixDestroy( &distance->potential );
hmSparseMatrixDestroy( &distance->laplacian );
hmSparseMatrixDestroy( &distance->heatFlowNeumann );
hmSparseMatrixDestroy( &distance->heatFlowDirichlet );
hmCholeskyFactorDestroy( &distance->laplacianFactor );
hmCholeskyFactorDestroy( &distance->heatFlowNeumannFactor );
hmCholeskyFactorDestroy( &distance->heatFlowDirichletFactor );
hmVectorDoubleDestroy( &distance->startTimesProcessor );
hmVectorDoubleDestroy( &distance->startTimesWallClock );
}
void hmTriDistanceEstimateTime( hmTriDistance* distance )
{
size_t nFaces = distance->surface->nFaces;
size_t* facesBegin = distance->surface->faces;
size_t* facesEnd = facesBegin + 3*nFaces;
size_t* f;
double* vertices = distance->surface->vertices;
double *p0, *p1, *p2;
hmVec3 e01, e12, e20;
double meanEdgeLength = 0.;
double nEdges = 0.;
/* iterate over faces */
for( f = facesBegin; f != facesEnd; f += 3 )
{
/* get vertex coordinates p0, p1, p2 */
p0 = &vertices[ f[0]*3 ];
p1 = &vertices[ f[1]*3 ];
p2 = &vertices[ f[2]*3 ];
/* add edge lengths to mean */
hmVec3Sub( e01, p1, p0 );
hmVec3Sub( e12, p2, p1 );
hmVec3Sub( e20, p0, p2 );
meanEdgeLength += hmVec3Norm( e01 );
meanEdgeLength += hmVec3Norm( e12 );
meanEdgeLength += hmVec3Norm( e20 );
nEdges += 3.;
}
meanEdgeLength /= nEdges;
/* set t to square of mean edge length */
distance->time = hmSquare( meanEdgeLength );
}
void hmTriDistanceSetBoundaryConditions( hmTriDistance* distance,
double boundaryConditions )
{
distance->boundaryConditions = boundaryConditions;
}
void hmTriDistanceBuild( hmTriDistance* distance )
{
size_t nVertices = distance->surface->nVertices;
if( distance->surface == NULL )
{
fprintf( stderr, "Error: hmTriDistanceBuild -- must specify a surface!\n" );
exit( 1 );
}
hmTriDistanceDestroy( distance );
hmVectorDoubleInitialize( &distance->startTimesProcessor );
hmVectorDoubleInitialize( &distance->startTimesWallClock );
hmDenseMatrixInitialize( &distance->isSource, nVertices, 1 );
hmDenseMatrixInitialize( &distance->distance, nVertices, 1 );
hmDenseMatrixInitialize( &distance->potential, nVertices, 1 );
/* only allocate space for both solutions if necessary */
if( distance->boundaryConditions < 1. ) /* partial Neumann */
{
hmDenseMatrixInitialize( &distance->heatNeumann, nVertices, 1 );
}
if( distance->boundaryConditions > 0. ) /* partial Dirichlet */
{
hmDenseMatrixInitialize( &distance->heatDirichlet, nVertices, 1 );
}
hmTriDistanceStartTiming( distance );
hmTriDistanceBuildMatrices( distance );
hmTriDistanceStopTiming( distance, "Matrix build time" );
hmTriDistanceFactorMatrices( distance );
}
void hmTriDistanceUpdate( hmTriDistance* distance )
{
size_t i;
hmTriDistanceSolveHeatEquation( distance );
for(i=0;i<15;i++){
hmTriDistanceComputePotential( distance );
hmTriDistanceSolvePoissonEquation( distance );
distance->heat = distance->distance.values;
}
}
void hmTriDistanceSolveHeatEquation( hmTriDistance* distance )
{
size_t nVertices = distance->surface->nVertices;
const double BC = distance->boundaryConditions;
int err;
cl_program program;
cl_kernel kernel;
cl_context context;
cl_command_queue queue;
cl_device_id device_id;
int gpu=1;
cl_mem input1;
cl_mem input2;
cl_mem output;
size_t global;
size_t local;
const char *KernelSource = "\n" \
"__kernel HeatEquation( \n" \
" __global double*input1, \n" \
" __global double*input2, \n" \
" __global double*output, \n" \
" const double BC, \n" \
" const unsigned int count) \n" \
"{ \n" \
" int i = get_global_id(0); \n" \
" if(i < count){ \n" \
" output[i]=(1-BC)*input1[i]+BC*input2[i]; \n" \
"} \n" \
"} \n" \
"\n";
err=clGetDeviceIDs(NULL, gpu?CL_DEVICE_TYPE_GPU:CL_DEVICE_TYPE_CPU, 1, &device_id, NULL);
context=clCreateContext(0, 1, &device_id, NULL, NULL, &err);
queue=clCreateCommandQueue(context, device_id, 0, &err);
program=clCreateProgramWithSource(context, 1, (const char **)&KernelSource, NULL, &err);
err=clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
kernel=clCreateKernel(program, "HeatEquation", &err);
/* only compute both solutions if necessary */
if( BC < 1. ) /* partial Neumann */
{
hmCholeskyFactorBacksolve( &distance->heatFlowNeumannFactor,
&distance->heatNeumann,
&distance->isSource );
}
if( BC > 0. ) /* partial Dirichlet */
{
hmCholeskyFactorBacksolve( &distance->heatFlowDirichletFactor,
&distance->heatDirichlet,
&distance->isSource );
}
/* store the final solution in hmTriDistance::heat,
* combining the two solutions if necessary */
if( BC > 0. && BC < 1. )
{
input1=clCreateBuffer(context, CL_MEM_READ_ONLY, nVertices * sizeof(double), NULL, NULL);
input2=clCreateBuffer(context, CL_MEM_READ_ONLY, nVertices * sizeof(double), NULL, NULL);
output=clCreateBuffer(context, CL_MEM_WRITE_ONLY, nVertices * sizeof(double), NULL, NULL);
err=clEnqueueWriteBuffer(queue, input1, CL_TRUE, 0, nVertices * sizeof(double), distance->heatNeumann.values, 0, NULL, NULL);
err=clEnqueueWriteBuffer(queue, input2, CL_TRUE, 0, nVertices * sizeof(double), distance->heatDirichlet.values, 0, NULL, NULL);
err=clSetKernelArg(kernel, 0, sizeof(cl_mem), &input1);
err=clSetKernelArg(kernel, 1, sizeof(cl_mem), &input2);
err=clSetKernelArg(kernel, 2, sizeof(cl_mem), &output);
err=clSetKernelArg(kernel, 3, sizeof(double), &BC);
err=clSetKernelArg(kernel, 4, sizeof(unsigned int), &nVertices );
err=clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE, sizeof(int), &local, NULL);
global=nVertices;
err=clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, NULL);
clFinish(queue);
err=clEnqueueReadBuffer(queue, output, CL_TRUE, 0, nVertices * sizeof(double), distance->heatNeumann.values, 0, NULL, NULL);
clReleaseMemObject(input1);
clReleaseMemObject(input2);
clReleaseMemObject(output);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseCommandQueue(queue);
clReleaseContext(context);
}
else if( BC == 0. ) /* pure Neumann */
{
distance->heat = distance->heatNeumann.values;
}
else /* pure Dirichlet */
{
distance->heat = distance->heatDirichlet.values;
}
}
void hmTriDistanceComputePotential( hmTriDistance* distance )
{
/* array counters */
size_t i;
/* local data handles */
int nFaces = distance->surface->nFaces;
int nVertices = distance->surface->nVertices;
const size_t* f = distance->surface->faces;
const double* w = distance->surface->weights;
const double* heat = distance->heat;
double* potential = distance->potential.values;
/* current triangle data */
double u0, u1, u2; /* heat values */
double rMag; /* reciprocal of magnitude */
double *t0, *t1, *t2; /* edge normals */
double *e0, *e1, *e2; /* cotan-weighted edge vectors */
hmVec3 X; /* normalized gradient */
double e0DotX, e1DotX, e2DotX;
double r=0.0037;
/* initialize potential to zero */
hmClearArrayDouble( potential, distance->surface->nVertices, 0. );
/* get pointers to first three edge normals */
t0 = &distance->surface->edgeNormals[0];
t1 = &distance->surface->edgeNormals[3];
t2 = &distance->surface->edgeNormals[6];
/* get pointers to first three weighted edges */
e0 = &distance->surface->weightedEdges[0];
e1 = &distance->surface->weightedEdges[3];
e2 = &distance->surface->weightedEdges[6];
/* add contribution from each face */
for( i = 0; i < nFaces; i++ )
{
/* get heat values at three vertices */
u0 = fabs( heat[ f[0] ] );
u1 = fabs( heat[ f[1] ] );
u2 = fabs( heat[ f[2] ] );
/* normalize heat values so that they have roughly unit magnitude */
rMag = 1./hmMaxDouble( hmMaxDouble( u0, u1 ), u2 );
if( !isinf(rMag) )
{
u0 *= rMag;
u1 *= rMag;
u2 *= rMag;
/* compute normalized gradient */
X[0] = u0*t0[0] + u1*t1[0] + u2*t2[0];
X[1] = u0*t0[1] + u1*t1[1] + u2*t2[1];
X[2] = u0*t0[2] + u1*t1[2] + u2*t2[2];
hmVec3Scale(X,(2+r*hmVec3Norm(X))/((2+r)*hmVec3Norm(X)));
/* add contribution to divergence */
e0DotX = hmVec3Dot( e0, X );
e1DotX = hmVec3Dot( e1, X );
e2DotX = hmVec3Dot( e2, X );
potential[ f[0] ] -= e1DotX - e2DotX;
potential[ f[1] ] -= e2DotX - e0DotX;
potential[ f[2] ] -= e0DotX - e1DotX;
if( isnan( potential[f[0]] ) ||
isnan( potential[f[1]] ) ||
isnan( potential[f[2]] ) )
{
fprintf( stderr, "NaN\n============\n" );
fprintf( stderr, "heat: %e %e %e\n", heat[f[0]], heat[f[1]], heat[f[2]] );
fprintf( stderr, " mag: %e\n", hmMaxDouble( hmMaxDouble( u0, u1 ), u2 ));
fprintf( stderr, "rMag: %e\n", rMag );
fprintf( stderr, " u: %e %e %e\n", u0, u1, u2 );
fprintf( stderr, " X: %e %e %e\n", X[0], X[1], X[2] );
fprintf( stderr, "ei*X: %e %e %e\n", e0DotX, e1DotX, e2DotX );
exit( 1 );
}
}
/* move to next face */
f += 3;
w += 3;
t0 += 9; t1 += 9; t2 += 9;
e0 += 9; e1 += 9; e2 += 9;
}
/* remove mean value so that the potential is
* in the range of the Laplace operator */
hmRemoveMean( potential, nVertices );
}
void hmTriDistanceSolvePoissonEquation( hmTriDistance* distance )
{
size_t i;
const size_t nVertices = distance->surface->nVertices;
double minDistance = DBL_MAX;
double* phi;
hmCholeskyFactorBacksolve( &distance->laplacianFactor,
&distance->distance,
&distance->potential );
/* subtract the minimum value */
phi = distance->distance.values;
for( i = 0; i < nVertices; i++ )
{
minDistance = hmMinDouble( minDistance, phi[i] );
}
#pragma omp parallel for
for( i = 0; i < nVertices; i++ )
{
phi[i] -= minDistance;
}
}
void hmTriDistanceBuildMatrices( hmTriDistance* distance )
{
size_t i,j;
size_t nz; /* current nonzero */
size_t lastNeighborIndex;
size_t count; /* number of times a given neighbor appears */
size_t* columnStart;
char* onBoundary;
double* columnSum;
double* x;
size_t* n;
const hmPairSizeTDouble *neighborsBegin, *neighborsEnd, *currentNeighbor;
const double boundaryConditions = distance->boundaryConditions;
const double time = distance->time;
double A; /* vertex area */
hmSparseMatrix* laplacian = &distance->laplacian;
hmSparseMatrix* heatFlowNeumann = &distance->heatFlowNeumann;
hmSparseMatrix* heatFlowDirichlet = &distance->heatFlowDirichlet;
hmTriMesh* mesh = distance->surface;
/* array counters */
int k,iter;
int j0, j1, j2;
/* local data handles */
size_t nFaces = mesh->nFaces;
size_t nVertices = mesh->nVertices;
const size_t* f;
double* w; /* current weights */
double* vertices = mesh->vertices;
double* vertexAreas;
/* current triangle data */
double* p[3]; /* vertex positions */
double *e[3]; /* edge vectors */
double *t[3]; /* rotated edge vectors */
hmVectorPairSizeTDouble *neighbors; /* temporary, redundant list of vertex neighbors */
hmVectorPairSizeTDouble *uniqueNeighbors; /* final list of unique vertex neighbors */
hmPairSizeTDouble neighbor; /* used to construct a record of the current neighbor */
hmVec3 u, v; /* edge vectors */
hmVec3 N; /* triangle normal */
double uvSinTheta, uvCosTheta;
hmDestroy( mesh->weights );
mesh->weights = malloc( 3*nFaces * sizeof(double) );
/* allocate storage for edge data */
hmDestroy( mesh->edgeNormals );
hmDestroy( mesh->weightedEdges );
mesh->edgeNormals = malloc( 9*nFaces * sizeof( double ));
mesh->weightedEdges = malloc( 9*nFaces * sizeof( double ));
/* initialize vertex areas to zero */
hmDestroy( mesh->vertexAreas );
mesh->vertexAreas = malloc( nVertices * sizeof( double ));
hmClearArrayDouble( mesh->vertexAreas, nVertices, 0. );
vertexAreas = mesh->vertexAreas;
/* allocate a list of redundant neighbors for each vertex */
neighbors = malloc( nVertices * sizeof( hmVectorPairSizeTDouble ));
hmDestroy( mesh->vertexNeighbors );
mesh->vertexNeighbors = malloc( nVertices * sizeof(hmVectorPairSizeTDouble) );
uniqueNeighbors = mesh->vertexNeighbors; /* short name */
columnStart=calloc((nVertices+1),sizeof(size_t));
columnSum=calloc( nVertices,sizeof(double));
#pragma omp parallel for
for( i = 0; i < nVertices; i++ )
{
hmVectorPairSizeTDoubleInitialize( &neighbors[i] );
hmVectorPairSizeTDoubleInitialize( &uniqueNeighbors[i] );
}
/* allocate an array of flags for boundary vertices */
hmDestroy( mesh->onBoundary );
mesh->onBoundary = malloc( nVertices * sizeof(char) );
/* iterate over triangles */
for(iter=0; iter < nFaces; iter++)
{
/* get vertex coordinates */
f=mesh->faces+3*iter;
w=mesh->weights+3*iter;
p[0] = &vertices[ f[0]*3 ];
p[1] = &vertices[ f[1]*3 ];
p[2] = &vertices[ f[2]*3 ];
/* iterate over triangle corners */
for( k = 0; k < 3; k++ )
{
/* get outgoing edge vectors u, v at current corner */
size_t v_index=f[k];
t[k]=mesh->edgeNormals+9*iter+3*k;
e[k]=mesh->weightedEdges+9*iter+3*k;
j0 = (0+k) % 3;
j1 = (1+k) % 3;
j2 = (2+k) % 3;
hmVec3Sub( u, p[j1], p[j0] );
hmVec3Sub( v, p[j2], p[j0] );
hmVec3Sub( e[k], p[j2], p[j1] );
/* compute (one-half of) the cotangent weight */
hmVec3Cross( N, u, v );
hmVec3Cross( t[k], N, e[k] );
uvSinTheta = hmVec3Norm( N );
uvCosTheta = hmVec3Dot( u, v );
vertexAreas[ v_index ] += uvSinTheta/6;
w[k] = .5 * uvCosTheta / uvSinTheta;
hmVec3Scale( e[k], w[k] );
}
for( k = 0; k < 3; k++ ){
j1 = (1+k) % 3;
j2 = (2+k) % 3;
neighbor.n = f[j1]; neighbor.x = w[j2]; hmVectorPairSizeTDoublePushBack( &neighbors[ f[k] ], neighbor );
neighbor.n = f[j2]; neighbor.x = w[j1]; hmVectorPairSizeTDoublePushBack( &neighbors[ f[k] ], neighbor );}
}
/* iterate over vertices */
#pragma omp parallel for private(lastNeighborIndex,count,j)
for( i = 0; i < nVertices; i++ )
{
/* sort neighbor list by index */
hmVectorPairSizeTDoubleSort( &neighbors[i] );
/* initially flag as an interior vertex */
mesh->onBoundary[i] = 0;
/* extract unique elements from neighbor list, summing weights */
hmVectorPairSizeTDoubleResize( &uniqueNeighbors[i], 0 );
lastNeighborIndex = -1;
count = 0;
for( j = 0; j < neighbors[i].size; j++ )
{
/* if we come across a new neighbor, add it to the list of unique neighbors */
if( neighbors[i].entries[j].n != lastNeighborIndex )
{
/* if we encountered the previous neighbor only
* once, this vertex must be on the surface boundary */
if( count == 1 )
{
mesh->onBoundary[i] = 1;
}
count = 1;
if(neighbors[i].entries[j].n>i){
hmVectorPairSizeTDoublePushBack( &uniqueNeighbors[i], neighbors[i].entries[j] );
columnStart[i+1]++;
}
lastNeighborIndex = neighbors[i].entries[j].n;
}
else
{
/* since we've seen this neighbor before, just accumulate its weight */
if(neighbors[i].entries[j].n>i)
uniqueNeighbors[i].entries[ uniqueNeighbors[i].size-1 ].x += neighbors[i].entries[j].x;
count++;
}
columnSum[i]+=neighbors[i].entries[j].x;
}
/* if the final neighbor was encountered only once, this is a boundary vertex */
if( count == 1 )
{
mesh->onBoundary[i] = 1;
}
hmVectorPairSizeTDoubleDestroy( &neighbors[i] );
}
free( neighbors );
/* determine the starting entry of nonzeros in
* each column, keeping the lower triangle only */
for( i = 1; i < (nVertices+1); ++i)
{
columnStart[i]+=columnStart[i-1];
}
x=malloc( columnStart[nVertices] * sizeof(double));
n=malloc( columnStart[nVertices] * sizeof(size_t));
onBoundary = distance->surface->onBoundary;
/* initialize matrices and copy column start pointers */
hmSparseMatrixDestroy( laplacian );
hmSparseMatrixInitialize( laplacian, nVertices, nVertices, (columnStart[nVertices]+nVertices) );
if( boundaryConditions < 1. ) /* partial Neumann */
{
hmSparseMatrixDestroy( heatFlowNeumann );
hmSparseMatrixInitialize( heatFlowNeumann, nVertices, nVertices, (columnStart[nVertices]+nVertices) );}
if( boundaryConditions > 0. ) /* partial Dirichlet */
{
hmSparseMatrixDestroy( heatFlowDirichlet );
hmSparseMatrixInitialize( heatFlowDirichlet, nVertices, nVertices, (columnStart[nVertices]+nVertices) );}
for( i = 0; i < nVertices+1; i++ )
{
distance->laplacian.columnStart[i] = columnStart[i]+i;
if( boundaryConditions < 1. ) /* partial Neumann */
distance->heatFlowNeumann.columnStart[i] = columnStart[i]+i;
if( boundaryConditions > 0. )/* partial Dirichlet */
distance->heatFlowDirichlet.columnStart[i] = columnStart[i]+i;
neighborsBegin = uniqueNeighbors[i].entries;
neighborsEnd = neighborsBegin + uniqueNeighbors[i].size;
for( currentNeighbor = neighborsBegin;
currentNeighbor != neighborsEnd;
currentNeighbor ++ )
{
x[columnStart[i]+currentNeighbor-neighborsBegin]=currentNeighbor->x;
n[columnStart[i]+currentNeighbor-neighborsBegin]=currentNeighbor->n;
}
}
/* fill nonzero entries */
for( i = 0; i < nVertices; i++ )
{
/* set diagonal entry of Laplacian, adding a small
regularization term in order to get strict
positive-definiteness (needed for CHOLMOD) */
laplacian->values[ columnStart[i]+i] = columnSum[i] + hmRegularization;
laplacian->rowIndices[columnStart[i]+i] = i;
A = distance->surface->vertexAreas[ i ];
if( boundaryConditions < 1. ) /* partial Neumann */
{
heatFlowNeumann->values[columnStart[i]+i] = A + time*columnSum[i];
heatFlowNeumann->rowIndices[columnStart[i]+i] = i;
}
if( boundaryConditions > 0. ) /* partial Dirichlet */
{
if( onBoundary[ i ] )
{
/* use the identity (times the mass matrix) for boundary
* rows/columns to enforce zero-Dirichlet conditions */
heatFlowDirichlet->values[columnStart[i]+i] = A;
}
else
{
heatFlowDirichlet->values[columnStart[i]+i] = A + time*columnSum[i];
}
heatFlowDirichlet->rowIndices[columnStart[i]+i] = i;
}
/* set off-diagonal entries below the diagonal */
for(nz=columnStart[i]+i+1;nz<columnStart[i+1]+i+1;nz++)
{
laplacian->values[ nz ] = -x[nz-i-1];
laplacian->rowIndices[ nz ] = n[nz-i-1];
if( boundaryConditions < 1. ) /* partial Neumann */
{
heatFlowNeumann->values[ nz ] = -time*x[nz-i-1];
heatFlowNeumann->rowIndices[ nz ] = n[nz-i-1];
}
if( boundaryConditions > 0. ) /* partial Dirichlet */
{
if( onBoundary[i] || onBoundary[n[nz-i-1]] )
{
/* set off-diagonals to zero so that we retain
* the same sparsity pattern as other matrices */
heatFlowDirichlet->values[ nz ] = 0.;
}
else
{
heatFlowDirichlet->values[ nz ] = -time*x[nz-i-1];
}
heatFlowDirichlet->rowIndices[ nz ] = n[nz-i-1];
}
}
}
}
void hmTriDistanceFactorMatrices( hmTriDistance* distance )
{
/* Laplacian */
hmCholeskyFactorDestroy ( &distance->laplacianFactor );
hmCholeskyFactorInitialize ( &distance->laplacianFactor );
hmCholeskyFactorReorder ( &distance->laplacianFactor, &distance->laplacian );
hmCholeskyFactorSymbolic ( &distance->laplacianFactor, &distance->laplacian );
hmCholeskyFactorNumerical ( &distance->laplacianFactor, &distance->laplacian );
/* only factor both heat flow operators if necessary */
/* (note that the symbolic factorization for Laplace can be reused in both
* cases since all three matrices have the same sparsity pattern) */
if( distance->boundaryConditions < 1. ) /* partial Neumann */
{
hmCholeskyFactorDestroy ( &distance->heatFlowNeumannFactor );
hmCholeskyFactorInitialize ( &distance->heatFlowNeumannFactor );
hmCholeskyFactorCopy ( &distance->heatFlowNeumannFactor, &distance->laplacianFactor );
#ifdef HM_USE_HSLMA87 /* currently no way to copy symbolic factorization in HSL_MA87... */
hmCholeskyFactorSymbolic ( &distance->heatFlowNeumannFactor, &distance->heatFlowNeumann );
#endif
hmCholeskyFactorNumerical ( &distance->heatFlowNeumannFactor, &distance->heatFlowNeumann );
}
if( distance->boundaryConditions > 0. ) /* partial Dirichlet */
{
hmCholeskyFactorDestroy ( &distance->heatFlowDirichletFactor );
hmCholeskyFactorInitialize ( &distance->heatFlowDirichletFactor );
hmCholeskyFactorCopy ( &distance->heatFlowDirichletFactor, &distance->laplacianFactor );
#ifdef HM_USE_HSLMA87
#else
hmCholeskyFactorSymbolic ( &distance->heatFlowDirichletFactor, &distance->heatFlowDirichlet );
#endif
hmCholeskyFactorNumerical ( &distance->heatFlowDirichletFactor, &distance->heatFlowDirichlet );
}
}
void hmTriDistanceUpdateTime( hmTriDistance* distance, double time )
{
hmTriDistanceStartTiming( distance );
distance->time = time;
hmTriDistanceBuildMatrices( distance );
hmCholeskyFactorNumerical( &distance->laplacianFactor, &distance->laplacian );
if( distance->boundaryConditions < 1. ) /* partial Neumann */
{
hmCholeskyFactorNumerical( &distance->heatFlowNeumannFactor, &distance->heatFlowNeumann );
}
if( distance->boundaryConditions > 0. ) /* partial Dirichlet */
{
hmCholeskyFactorNumerical( &distance->heatFlowDirichletFactor, &distance->heatFlowDirichlet );
}
hmTriDistanceStopTiming( distance, "Update t parameter" );
}
void hmTriDistanceStartTiming( hmTriDistance* distance )
{
struct timeval t;
struct timezone tz;
double startTimeProcessor;
double startTimeWallClock;
if( !distance->verbose )
{
return;
}
startTimeProcessor = (double) clock() / (double) CLOCKS_PER_SEC;
hmVectorDoublePushBack( &distance->startTimesProcessor, startTimeProcessor );
gettimeofday( &t, &tz );
startTimeWallClock = (double) t.tv_sec + 1e-6*(double) t.tv_usec;
hmVectorDoublePushBack( &distance->startTimesWallClock, startTimeWallClock );
}
void hmTriDistanceStopTiming( hmTriDistance* distance,
const char* label )
{
int i;
struct timeval t;
struct timezone tz;
double startTimeProcessor, stopTimeProcessor;
double startTimeWallClock, stopTimeWallClock;
int nTabs = distance->startTimesProcessor.size-1;
if( !distance->verbose )
{
return;
}
startTimeProcessor = hmVectorDoublePopBack( &distance->startTimesProcessor );
stopTimeProcessor = (double) clock() / (double) CLOCKS_PER_SEC;
gettimeofday( &t, &tz );
startTimeWallClock = hmVectorDoublePopBack( &distance->startTimesWallClock );
stopTimeWallClock = (double) t.tv_sec + 1e-6*(double) t.tv_usec;
for( i = 0; i < nTabs; i++ ) printf( "\t" );
printf( "%s\n", label );
for( i = 0; i < nTabs; i++ ) printf( "\t" );
printf( "--------------------------------------------\n" );
for( i = 0; i < nTabs; i++ ) printf( "\t" );
printf( " processor time: %f seconds\n", stopTimeProcessor-startTimeProcessor );
for( i = 0; i < nTabs; i++ ) printf( "\t" );
printf( "wall-clock time: %f seconds\n", stopTimeWallClock-startTimeWallClock );
printf( "\n" );
}
|
DRB092-threadprivatemissing2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
This is the case for a variable referenced within a construct.
Data race pairs sum0@68:7 vs. sum0@68:12
sum0@68:7 vs. sum0@68:7
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
//#pragma omp threadprivate(sum0)
int main()
{
int i, sum=0;
#pragma omp parallel
{
#pragma omp for schedule(dynamic)
for (i=1;i<=1000;i++)
{
sum0=sum0+i;
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
return 0;
}
|
simple.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <spllt_iface.h>
int main(int argc, char ** argv){
void *akeep = NULL;
void *fkeep = NULL;
int *ptr = NULL;
int *row = NULL;
double *val = NULL;
int *order = NULL;
double *x = NULL;
double *rhs = NULL;
double *y = NULL;
double *workspace = NULL;
int n, nnz, nrhs, nb;
long worksize;
spllt_inform_t info;
spllt_options_t options = SPLLT_OPTIONS_NULL();
int stat;
// Create the matrix
// [ 2 -1 0 ]
// [ -1 2 -1 ]
// [ 0 -1 2 ]
n = 3;
nnz = 5;
ptr = malloc((n+1) * sizeof(int));
row = malloc(nnz * sizeof(int));
val = malloc(nnz * sizeof(double));
options.nb = nb = 4;
ptr[0] = 1; ptr[1] = 3; ptr[2] = 5; ptr[3] = 6;
row[0] = 1; row[1] = 2; row[2] = 2; row[3] = 3; row[4] = 3;
for(int i = 0; i < nnz; i++) val[i] = 2.0;
val[1] = - 1.0; val[3] = - 1.0;
order = malloc(n * sizeof(int));
//Create RHS
nrhs = 1;
x = malloc(n * nrhs * sizeof(double));
rhs = malloc(n * nrhs * sizeof(double));
for(int i = 0; i < n; i++) rhs[i] = 1.0;
memcpy(x, rhs, n * sizeof(double));
#pragma omp parallel
#pragma omp single
{
spllt_analyse(&akeep, &fkeep, &options, n, ptr,
row, &info, order);
spllt_factor(akeep, fkeep, &options, nnz, val, &info);
spllt_wait();
spllt_prepare_solve(akeep, fkeep, nb, nrhs, &worksize, &info);
printf("Need a workspace of size %ld\n", worksize);
y = calloc( n * nrhs, sizeof(double));
workspace = calloc( worksize, sizeof(double));
spllt_set_mem_solve(akeep, fkeep, nb, nrhs, worksize, y, workspace, &info);
spllt_solve(fkeep, &options, order, nrhs, x, &info, 6);
spllt_wait();
//spllt_solve(fkeep, &options, order, nrhs, x, &info, 2);
spllt_chkerr(n, ptr, row, val, nrhs, x, rhs);
}
spllt_deallocate_akeep(&akeep, &stat);
spllt_deallocate_fkeep(&fkeep, &stat);
return 0;
}
|
xomp_accelerator_sched_test_v2.c | // Liao 8/30/2013
// A dedicated self-contained file to test a scheduler using round-robin method across multiple threads
// v1: using exclusive upper bounds
// V2: using inclusive upper bounds
//
// Compile : gcc -fopenmp thisfile.c
#include <stdio.h>
#include <omp.h>
#include <assert.h>
/*
_p_num_threads: number of threads of the thread team participating the scheduling
_p_thread_id: the current thread's id within the current team
Return the adjusted numbers including:
loop_chunk_size: the real chunk size considering original chunksize and step
loop_sched_index: the lower bound for current thread
loop_stride: the total stride for one round of scheduling of all threads
*/
void XOMP_static_sched_init(int lb, int up, int step, int orig_chunk_size, int _p_num_threads, int _p_thread_id, \
int * loop_chunk_size, int * loop_sched_index, int * loop_stride)
{
int nthds = _p_num_threads;
if (nthds == 1) { // single thread case
*loop_sched_index = lb;
//loop_end = up;
*loop_chunk_size = orig_chunk_size * step;
*loop_stride = (*loop_chunk_size) * nthds;
return;
}
*loop_chunk_size = orig_chunk_size * step;
*loop_sched_index = lb + (*loop_chunk_size)* _p_thread_id;
*loop_stride = (*loop_chunk_size) * nthds;
//int loop_end = up;
// int is_last = 0;
}
/*
Using current thread ID (_p_thread_id) and team size (_p_num_threads), calculate lb and ub for the current thread
for the round robin scheduling with lower (loop_sched_index), upper (loop_end) , stride (loop_stride), and chunk size (loop_chunk_size)
*/
int XOMP_static_sched_next(
int* loop_sched_index , int loop_end, int orig_step, int loop_stride, int loop_chunk_size,
int _p_num_threads, int _p_thread_id,
int *lb,int *ub)
{
int b,e;
b = *loop_sched_index;
//The code logic is original for exclusive upper bound!!
// But in ROSE, we normalize all loops to be inclusive bounds. So we have to ajust them in the functions, instead of during transformation.
//
// 1. adjust the original loop end from inclusive to be exclusive.
if (orig_step >0)
loop_end ++; // expect the user code will use the upper bound as an inclusive one, so minus one in advance
else
loop_end --;
if (_p_num_threads == 1) { /* not in parallel */
e = loop_end;
if(b == e) return 0;
*lb = b;
*ub = e;
*loop_sched_index = e;
#if 1 // need to adjust here!
if (orig_step >0)
*ub --; // expect the user code will use the upper bound as an inclusive one, so minus one in advance
else
*ub ++;
#endif
return 1;
} // thread team has 1 thread only
*loop_sched_index += loop_stride;
e = b + loop_chunk_size;
#if 1 // must timely adjust e here !!
if (orig_step >0)
e --; // expect the user code will use the upper bound as an inclusive one, so minus one in advance
else
e ++;
#endif
if(loop_chunk_size > 0){
if(b >= loop_end) return 0;
if(e >= loop_end){
e = loop_end;
// tp->is_last = 1;
}
} else {
if(b <= loop_end) return 0;
#if 0 // too late to adjust, e is already used before!!
if(e <= tp->loop_end){
e = tp->loop_end;
tp->is_last = 1;
}
#endif
}
*lb = b;
*ub = e;
return 1;
}
void OUT__2__10550__(int n,int *_dev_u)
{
int ij;
int _dev_lower, _dev_upper;
// variables for adjusted loop info considering both original chunk size and step(strip)
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
// 1-D thread block:
int _dev_thread_num = omp_get_num_threads();
int _dev_thread_id = omp_get_thread_num();
printf ("thread count = %d, current thread id = %d\n", _dev_thread_num, _dev_thread_id);
int orig_start =0; // must be correct!!
int orig_end = n-1; // TODO exclusive bound
int orig_step = 1;
int orig_chunk_size = 1;
XOMP_static_sched_init (orig_start, orig_end, orig_step, orig_chunk_size, _dev_thread_num, _dev_thread_id, \
& _dev_loop_chunk_size , & _dev_loop_sched_index, & _dev_loop_stride);
printf ("Initialized chunk size = %d, sched indx =%d, stride = %d\n",_dev_loop_chunk_size, _dev_loop_sched_index, _dev_loop_stride);
while (XOMP_static_sched_next (&_dev_loop_sched_index, orig_end, orig_step, _dev_loop_stride, _dev_loop_chunk_size, _dev_thread_num, _dev_thread_id, & _dev_lower
, & _dev_upper))
{
printf ("Thread ID: %d Allocated lower = %d upper = %d\n", _dev_thread_id, _dev_lower, _dev_upper);
for (ij = _dev_lower ; ij <= _dev_upper; ij ++) { // using inclusive bound here
_dev_u[ij] += (n - ij);
}
}
}
#define SIZE 10
int a[SIZE], b[SIZE], c[SIZE] ;
int main ()
{
int i;
// reference array and values for each element
for (i=0; i<=SIZE-1; i++)
{
a[i] = 0;
b[i] = 0;
c[i] = 0;
a[i] += (SIZE - i); // reverse order to make sure no default values are messing up things
// futher using += to catch duplicated execution of one iteartion!
}
// calcualted array elements using the scheduling functions
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
}
printf ("-------------------1 thread case ------------\n");
// calcualted array elements using the scheduling functions
#pragma omp parallel sections num_threads(1)
{
#pragma omp section
OUT__2__10550__ (SIZE, c);
}
for (i=0; i<SIZE; i++)
{
printf ("a[%d]=%d, b[%d]=%d, c[%d]= %d \n", i, a[i], i, b[i], i, c[i]);
}
for (i=0; i<SIZE; i++)
{
assert (a[i]==b[i] && a[i]==c[i]);
}
printf ("Success if you see this printf output!\n");
return 0;
}
|
RBF_evaluate_Fast.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "mex.h"
#include <stdio.h>
#include <math.h>
#include "blas.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#else
#warning "OpenMP not enabled. Compile with mex RBF_evaluate_Fast.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp""
#endif
/*************************************************************************/
double RBF_function(double d, double c, char *RBF_function_name)
{
double val = -1;
if (strcmp(RBF_function_name, "gaussian")==0)
{
val = exp(-0.5*d*d/(c*c));
return val;
}
if (strcmp(RBF_function_name, "thinplate")==0)
{
val = d*d*log(d+1);
return val;
}
if (strcmp(RBF_function_name, "cubic")==0)
{
val = (d*d*d);
return val;
}
if (strcmp(RBF_function_name, "multiquadric")==0)
{
val = sqrt(1+d*d/(c*c));
return val;
}
return val;
}
/*************************************************************************/
void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
char *RBF_function_name = mxArrayToString(prhs[0]);
/* Check for proper number of arguments */
if(nrhs!=5) {
mexErrMsgTxt("5 inputs are required.");
} else if(nlhs>1) {
mexErrMsgTxt("Too many output arguments.");
}
double* interp_points = mxGetPr(prhs[1]);
int nI = mxGetN(prhs[1]);
double* x = mxGetPr(prhs[2]);
int dimX = mxGetM(prhs[2]);
int nPoints = mxGetN(prhs[2]);
double* tmpPtr = mxGetPr(prhs[3]);
double constant = tmpPtr[0];
double* coeff = mxGetPr(prhs[4]);
plhs[0] = mxCreateDoubleMatrix(nPoints,1, mxREAL);
double* I_f = mxGetPr(plhs[0]);
int i;
#pragma omp parallel for shared(I_f,x) private(i) firstprivate(coeff,interp_points,nI,dimX,constant,RBF_function_name)
for (i = 0; i < nPoints; i++)
{
int l, k;
I_f[i] = 0.0;
for (k = 0; k < nI; k++)
{
/*d = distance(x[:,i], interp_points(:,k));*/
double tmp = 0;
for (l = 0; l < dimX; l++)
{
double tmp2 = (x[l+dimX*i] - interp_points[l+dimX*k]);
tmp += (tmp2*tmp2);
}
double d = sqrt(tmp);
I_f[i] += coeff[k] * RBF_function(d, constant, RBF_function_name);
}
I_f[i] += coeff[nI];
for (k = 0; k < dimX; k++)
{
I_f[i] += coeff[k+nI+1]*x[k+dimX*i];;
}
}
mxFree(RBF_function_name);
}
/*************************************************************************/
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(32*t2-Nz-508,512)),ceild(8*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(16*t1+Nx+29,512)),floord(32*t2+Nx+28,512)),floord(8*t3+Nx+4,512)),floord(32*t1-32*t2+Nz+Nx+27,512));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),512*t4+510),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
genome.c | /* =============================================================================
*
* genome.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "gene.h"
#include "random.h"
#include "segments.h"
#include "sequencer.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "vector.h"
enum param_types {
PARAM_GENE = (unsigned char)'g',
PARAM_NUMBER = (unsigned char)'n',
PARAM_SEGMENT = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
#define PARAM_DEFAULT_GENE (1L << 14)
#define PARAM_DEFAULT_NUMBER (1L << 22)
#define PARAM_DEFAULT_SEGMENT (1L << 6)
#define PARAM_DEFAULT_THREAD (1L)
long global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" g <UINT> Length of [g]ene (%li)\n", PARAM_DEFAULT_GENE);
printf(" n <UINT> Min [n]umber of segments (%li)\n", PARAM_DEFAULT_NUMBER);
printf(" s <UINT> Length of [s]egment (%li)\n", PARAM_DEFAULT_SEGMENT);
printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_THREAD);
puts("");
puts("The actual number of segments created may be greater than -n");
puts("in order to completely cover the gene.");
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams( void )
{
global_params[PARAM_GENE] = PARAM_DEFAULT_GENE;
global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER;
global_params[PARAM_SEGMENT] = PARAM_DEFAULT_SEGMENT;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "g:n:s:t:")) != -1) {
switch (opt) {
case 'g':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN (argc,argv)
{
TIMER_T start;
TIMER_T stop;
/* Initialization */
parseArgs(argc, (char** const)argv);
SIM_GET_NUM_CPU(global_params[PARAM_THREAD]);
printf("Creating gene and segments... ");
fflush(stdout);
long geneLength = global_params[PARAM_GENE];
long segmentLength = global_params[PARAM_SEGMENT];
long minNumSegment = global_params[PARAM_NUMBER];
long numThread = global_params[PARAM_THREAD];
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
random_t* randomPtr = random_alloc();
assert(randomPtr != NULL);
random_seed(randomPtr, 0);
gene_t* genePtr = gene_alloc(geneLength);
assert( genePtr != NULL);
gene_create(genePtr, randomPtr);
char* gene = genePtr->contents;
segments_t* segmentsPtr = segments_alloc(segmentLength, minNumSegment);
assert(segmentsPtr != NULL);
segments_create(segmentsPtr, genePtr, randomPtr);
sequencer_t* sequencerPtr = sequencer_alloc(geneLength, segmentLength, segmentsPtr);
assert(sequencerPtr != NULL);
puts("done.");
printf("Gene length = %li\n", genePtr->length);
printf("Segment length = %li\n", segmentsPtr->length);
printf("Number segments = %li\n", vector_getSize(segmentsPtr->contentsPtr));
fflush(stdout);
/* Benchmark */
printf("Sequencing gene... ");
fflush(stdout);
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
GOTO_SIM();
TIMER_READ(start);
#ifdef OTM
#pragma omp parallel
{
sequencer_run(sequencerPtr);
}
#else
thread_start(sequencer_run, (void*)sequencerPtr);
#endif
TIMER_READ(stop);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
GOTO_REAL();
puts("done.");
printf("Time = %lf\n", TIMER_DIFF_SECONDS(start, stop));
fflush(stdout);
/* Check result */
{
char* sequence = sequencerPtr->sequence;
int result = strcmp(gene, sequence);
printf("Sequence matches gene: %s\n", (result ? "no" : "yes"));
if (result) {
printf("gene = %s\n", gene);
printf("sequence = %s\n", sequence);
}
fflush(stdout);
assert(strlen(sequence) >= strlen(gene));
}
/* Clean up */
printf("Deallocating memory... ");
fflush(stdout);
sequencer_free(sequencerPtr);
segments_free(segmentsPtr);
gene_free(genePtr);
random_free(randomPtr);
puts("done.");
fflush(stdout);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of genome.c
*
* =============================================================================
*/
|
task-dependency.c | /*
* task-dependency.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include "ompt/ompt-signal.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char *argv[]) {
int var = 0, a = 0, b = 0;
#pragma omp parallel num_threads(8) shared(var, a)
#pragma omp master
{
#pragma omp task shared(var, a, b) depend(out : var)
{
OMPT_SIGNAL(a);
var++;
OMPT_SIGNAL(b);
}
#pragma omp task shared(a) depend(in : var)
{
OMPT_SIGNAL(a);
OMPT_WAIT(a, 3);
}
#pragma omp task shared(var, b) // depend(in: var) is missing here!
{
OMPT_WAIT(b, 1);
var++;
OMPT_SIGNAL(a);
}
// Give other thread time to steal the task.
OMPT_WAIT(a, 2);
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:43
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:30
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
GB_unop__identity_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_fp32
// op(A') function: GB_unop_tran__identity_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_fp32
(
uint8_t *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
atomic.c | //////////////////////////////////////////////////////////////
//
// atomic.c
//
// Copyright (c) 2017, Hassan Salehe Matar
// All rights reserved.
//
// This file is part of Clanomp. For details, see
// https://github.com/hassansalehe/Clanomp. Please also
// see the LICENSE file for additional BSD notice
//
// Redistribution and use in source and binary forms, with
// or without modification, are permitted provided that
// the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the
// above copyright notice, this list of conditions and
// the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names
// of its contributors may be used to endorse or promote
// products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////
// From the OpenMP specification:
// * "atomic" ensures specific memory location is accessed
// atomically
//
// * #pragma omp atomic (read|write|update|capture)
// expression <statement|block>
//
// References:
// 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf
// 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
#include <stdio.h>
#include <omp.h>
int main() {
int count = 0;
#pragma omp parallel shared(count)
{
#pragma omp atomic
count++;
}
printf("Value of count: %d, construct: <atomic>\n", count);
return 0;
}
|
GB_unaryop__identity_uint32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint32_fp64
// op(A') function: GB_tran__identity_uint32_fp64
// C type: uint32_t
// A type: double
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint32_fp64
(
uint32_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <omp.h>
#include <stdio.h>
#include <string.h>
#define MAX(a, b) (a > b) ? a : b
#define MAX_N 10000
#define MAX_M 1000000
int W[MAX_N], V[MAX_N];
int DP[2][MAX_M+1]; // [0][] = skip, [1][] = selected
int main(void) {
int N, M;
scanf("%d %d", &N, &M);
for (int i = 0; i < N; i++) {
scanf("%d %d", &W[i], &V[i]);
}
memset(DP, 0, sizeof(int)*(M+1)*2);
int i_in = 0, i_out;
for (int i = 0; i < N; i++) {
i_out = 1 - i_in;
int W_curr = W[i], V_curr = V[i];
#pragma omp parallel
{
#pragma omp for
for (int i = W_curr; i <= M; i++) {
DP[i_out][i] = MAX(DP[i_in][i-W_curr]+V_curr, DP[i_in][i]);
}
#pragma omp for
for (int i = 0; i < W_curr; i++) {
DP[i_out][i] = DP[i_in][i];
}
}
i_in = i_out;
}
printf("%d\n", DP[i_out][M]);
return 0;
}
|
LocalFilterScore.h | /*
* LocalFilterScore.h
*
* Created on: 20.11.2014
* Author: Michael Hamann, Gerd Lindner
*/
#ifndef LOCALLOGSCORE_H
#define LOCALLOGSCORE_H
#include "../edgescores/EdgeScore.h"
#include "../auxiliary/Parallel.h"
#include <atomic>
#include <memory>
namespace NetworKit {
/**
* Local filtering edge scoring. Edges with high score are more important.
*
* Edges are ranked locally, the top d^e (logarithmic, default) or 1+e*(d-1) edges (non-logarithmic) are kept.
* For equal attribute values, neighbors of low degree are preferred.
*/
template<typename InType>
class LocalFilterScore : public EdgeScore<double> {
public:
/**
* Initialize the local edge filtering score.
*
* @param G The graph for which the score shall be.
* @param attribute The input attribute according to which the edges shall be fitlered locally.
* @param logarithmic If the score shall be logarithmic in the rank (then d^e edges are kept). Linear otherwise.
*/
LocalFilterScore(const Graph& G, const std::vector< InType > &attribute, bool logarithmic = true) :
EdgeScore<double>(G), attribute(attribute), logarithmic(logarithmic) {}
/**
* Execute the algorithm.
*/
virtual void run() {
if (!G.hasEdgeIds()) {
throw std::runtime_error("edges have not been indexed - call indexEdges first");
}
/*
* For each edge, we calculate the minimum required sparsification exponent e
* such that the edge is contained in the sparse graph.
*/
std::unique_ptr<std::atomic<double>[]> sparsificationExp(new std::atomic<double>[G.upperEdgeIdBound()]{});
G.balancedParallelForNodes([&](node i) {
count d = G.degree(i);
/*
* The top d^e edges (sorted by similarity in descending order)
* are to be kept in the sparse graph.
*/
std::vector<edgeid> neighbors;
neighbors.reserve(d);
G.forNeighborsOf(i, [&](node _i, node j, edgeid eid) {
neighbors.emplace_back(eid);
});
std::sort(neighbors.begin(), neighbors.end(), [&](const edgeid& e1, const edgeid& e2) {
return attribute[e1] > attribute[e2];
});
count rank = 0;
count numSame = 1;
InType oldValue = std::numeric_limits<InType>::lowest();
for (edgeid eid : neighbors) {
if (attribute[eid] != oldValue) {
rank += numSame;
numSame = 1;
} else {
++numSame;
}
double e = 1.0;
if (d > 1) {
if (logarithmic) {
e = 1.0 - log(rank) / log(d);
} else {
e = 1.0 - (rank-1) * 1.0 / (d - 1); // Keep top 1 + e * (d-1) edges
}
}
Aux::Parallel::atomic_max(sparsificationExp[eid], e);
}
});
scoreData.clear();
scoreData.resize(G.upperEdgeIdBound());
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(scoreData.size()); ++i) {
scoreData[i] = sparsificationExp[i];
}
hasRun = true;
}
virtual double score(node u, node v) {
throw std::runtime_error("Not implemented: Use scores() instead.");
}
virtual double score(edgeid eid) {
throw std::runtime_error("Not implemented: Use scores() instead.");
}
private:
const std::vector<InType>& attribute;
bool logarithmic;
};
} // namespace NetworKit
#endif // LOCALLOGSCORE_H
|
functions.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file functions.h
* \brief Generic quantum computing functions
*/
#ifndef FUNCTIONS_H_
#define FUNCTIONS_H_
namespace qpp {
// Eigen function wrappers
/**
* \brief Transpose
*
* \param A Eigen expression
* \return Transpose of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
transpose(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::transpose()");
// END EXCEPTION CHECKS
return rA.transpose();
}
/**
* \brief Complex conjugate
*
* \param A Eigen expression
* \return Complex conjugate of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
conjugate(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::conjugate()");
// END EXCEPTION CHECKS
return rA.conjugate();
}
/**
* \brief Adjoint
*
* \param A Eigen expression
* \return Adjoint (Hermitian conjugate) of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> adjoint(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::adjoint()");
// END EXCEPTION CHECKS
return rA.adjoint();
}
/**
* \brief Inverse
*
* \param A Eigen expression
* \return Inverse of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> inverse(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::inverse()");
// END EXCEPTION CHECKS
return rA.inverse();
}
/**
* \brief Trace
*
* \param A Eigen expression
* \return Trace of \a A, as a scalar over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar trace(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::trace()");
// END EXCEPTION CHECKS
return rA.trace();
}
/**
* \brief Determinant
*
* \param A Eigen expression
* \return Determinant of \a A, as a scalar over the same scalar field as \a A.
* Returns \f$\pm \infty\f$ when the determinant overflows/underflows.
*/
template <typename Derived>
typename Derived::Scalar det(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::det()");
// END EXCEPTION CHECKS
return rA.determinant();
}
/**
* \brief Logarithm of the determinant
*
* Useful when the determinant overflows/underflows
*
* \param A Eigen expression
* \return Logarithm of the determinant of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar logdet(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logdet()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logdet()");
// END EXCEPTION CHECKS
Eigen::PartialPivLU<dyn_mat<typename Derived::Scalar>> lu(rA);
dyn_mat<typename Derived::Scalar> U =
lu.matrixLU().template triangularView<Eigen::Upper>();
typename Derived::Scalar result = std::log(U(0, 0));
for (idx i = 1; i < static_cast<idx>(rA.rows()); ++i)
result += std::log(U(i, i));
return result;
}
/**
* \brief Element-wise sum of \a A
*
* \param A Eigen expression
* \return Element-wise sum of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar sum(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sum()");
// END EXCEPTION CHECKS
return rA.sum();
}
/**
* \brief Element-wise product of \a A
*
* \param A Eigen expression
* \return Element-wise product of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar prod(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prod()");
// END EXCEPTION CHECKS
return rA.prod();
}
/**
* \brief Frobenius norm
*
* \param A Eigen expression
* \return Frobenius norm of \a A
*/
template <typename Derived>
double norm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::norm()");
// END EXCEPTION CHECKS
// convert matrix to complex then return its norm
return (rA.template cast<cplx>()).norm();
}
/**
* \brief Normalizes state vector (column or row vector) or density matrix
*
* \param A Eigen expression
* \return Normalized state vector or density matrix
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
normalize(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::normalize()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result;
if (internal::check_cvector(rA) || internal::check_rvector(rA)) {
double normA = norm(rA);
try {
if (normA == 0)
throw std::overflow_error("Division by zero!");
} catch (...) {
std::cerr << "In qpp::normalize()\n";
throw;
}
result = rA / normA;
} else if (internal::check_square_mat(rA)) {
typename Derived::Scalar traceA = trace(rA);
try {
if (std::abs(traceA) == 0)
throw std::overflow_error("Division by zero!");
} catch (...) {
std::cerr << "In qpp::normalize()\n";
throw;
}
result = rA / trace(rA);
} else
throw exception::MatrixNotSquareNorVector("qpp::normalize()");
return result;
}
/**
* \brief Full eigen decomposition
* \see qpp::heig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a complex dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<cplx>, cmat> eig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::eig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::eig()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Eigenvalues
* \see qpp::hevals()
*
* \param A Eigen expression
* \return Eigenvalues of \a A, as a complex dynamic column vector
*/
template <typename Derived>
dyn_col_vect<cplx> evals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evals()");
// END EXCEPTION CHECKS
return eig(rA).first;
}
/**
* \brief Eigenvectors
* \see qpp::hevects()
*
* \param A Eigen expression
* \return Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
cmat evects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evects()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return eig(rA).second;
}
/**
* \brief Full eigen decomposition of Hermitian expression
* \see qpp::eig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a real dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<double>, cmat>
heig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::heig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::heig()");
// END EXCEPTION CHECKS
Eigen::SelfAdjointEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Hermitian eigenvalues
* \see qpp::evals()
*
* \param A Eigen expression
* \return Eigenvalues of Hermitian \a A, as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> hevals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevals()");
// END EXCEPTION CHECKS
return heig(rA).first;
}
/**
* \brief Eigenvectors of Hermitian matrix
* \see qpp::evects()
*
* \param A Eigen expression
* \return Eigenvectors of Hermitian matrix \a A, as columns of a complex matrix
*/
template <typename Derived>
cmat hevects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevects()");
// END EXCEPTION CHECKS
return heig(rA).second;
}
/**
* \brief Full singular value decomposition
*
* \param A Eigen expression
* \return Tuple of: 1. Left sigular vectors of \a A, as columns of a complex
* dynamic matrix, 2. Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector, and 3. Right singular vectors of \a A,
* as columns of a complex dynamic matrix
*/
template <typename Derived>
std::tuple<cmat, dyn_col_vect<double>, cmat>
svd(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svd()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU |
Eigen::DecompositionOptions::ComputeFullV);
return std::make_tuple(sv.matrixU(), sv.singularValues(), sv.matrixV());
}
/**
* \brief Singular values
*
* \param A Eigen expression
* \return Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> svals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svals()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(rA);
return sv.singularValues();
}
/**
* \brief Left singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the left singular
* vectors of \a A
*/
template <typename Derived>
cmat svdU(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdU()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU);
return sv.matrixU();
}
/**
* \brief Right singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the right singular
* vectors of \a A
*/
template <typename Derived>
cmat svdV(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdV()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullV);
return sv.matrixV();
}
// Matrix functional calculus
/**
* \brief Functional calculus f(A)
*
* \param A Eigen expression
* \param f Pointer-to-function from complex to complex
* \return \a \f$f(A)\f$
*/
template <typename Derived>
cmat funm(const Eigen::MatrixBase<Derived>& A, cplx (*f)(const cplx&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::funm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::funm()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = (*f)(evals(i)); // apply f(x) to each eigenvalue
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Matrix square root
*
* \param A Eigen expression
* \return Matrix square root of \a A
*/
template <typename Derived>
cmat sqrtm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sqrtm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sqrtm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sqrt);
}
/**
* \brief Matrix absolute value
*
* \param A Eigen expression
* \return Matrix absolute value of \a A
*/
template <typename Derived>
cmat absm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::absm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::absm()");
// END EXCEPTION CHECKS
return sqrtm(adjoint(rA) * rA);
}
/**
* \brief Matrix exponential
*
* \param A Eigen expression
* \return Matrix exponential of \a A
*/
template <typename Derived>
cmat expm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::expm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::expm()");
// END EXCEPTION CHECKS
return funm(rA, &std::exp);
}
/**
* \brief Matrix logarithm
*
* \param A Eigen expression
* \return Matrix logarithm of \a A
*/
template <typename Derived>
cmat logm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logm()");
// END EXCEPTION CHECKS
return funm(rA, &std::log);
}
/**
* \brief Matrix sin
*
* \param A Eigen expression
* \return Matrix sine of \a A
*/
template <typename Derived>
cmat sinm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sinm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sinm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sin);
}
/**
* \brief Matrix cos
*
* \param A Eigen expression
* \return Matrix cosine of \a A
*/
template <typename Derived>
cmat cosm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cosm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::cosm()");
// END EXCEPTION CHECKS
return funm(rA, &std::cos);
}
/**
* \brief Matrix power
* \see qpp::powm()
*
* Uses the spectral decomposition of \a A to compute the matrix power.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param z Complex number
* \return Matrix power \f$A^z\f$
*/
template <typename Derived>
cmat spectralpowm(const Eigen::MatrixBase<Derived>& A, const cplx z) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::spectralpowm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::spectralpowm()");
// END EXCEPTION CHECKS
// Define A^0 = Id, for z IDENTICALLY zero
if (real(z) == 0 && imag(z) == 0)
return cmat::Identity(rA.rows(), rA.rows());
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = std::pow(evals(i), z);
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Fast matrix power based on the SQUARE-AND-MULTIPLY algorithm
* \see qpp::spectralpowm()
*
* Explicitly multiplies the matrix \a A with itself \a n times.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Matrix power \f$A^n\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> powm(const Eigen::MatrixBase<Derived>& A,
idx n) {
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::powm()");
// check square matrix
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::powm()");
// END EXCEPTION CHECKS
// if n = 1, return the matrix unchanged
if (n == 1)
return A;
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Identity(A.rows(), A.rows());
// if n = 0, return the identity (as just prepared in result)
if (n == 0)
return result;
dyn_mat<typename Derived::Scalar> cA = A.derived(); // copy
// fast matrix power
for (; n > 0; n /= 2) {
if (n % 2)
result = (result * cA).eval();
cA = (cA * cA).eval();
}
return result;
}
/**
* \brief Schatten matrix norm
*
* \param A Eigen expression
* \param p Real number, greater or equal to 1,
* use qpp::infty for \f$p = \infty\f$
* \return Schatten-\a p matrix norm of \a A
*/
template <typename Derived>
double schatten(const Eigen::MatrixBase<Derived>& A, double p) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::schatten()");
if (p < 1)
throw exception::OutOfRange("qpp::schatten()");
// END EXCEPTION CHECKS
if (p == infty) // infinity norm (largest singular value)
return svals(rA)(0);
const dyn_col_vect<double> sv = svals(rA);
double result = 0;
for (idx i = 0; i < static_cast<idx>(sv.rows()); ++i)
result += std::pow(sv[i], p);
return std::pow(result, 1. / p);
}
// other functions
/**
* \brief Functor
*
* \param A Eigen expression
* \param f Pointer-to-function from scalars of \a A to \a OutputScalar
* \return Component-wise \f$f(A)\f$, as a dynamic matrix
* over the \a OutputScalar scalar field
*/
template <typename OutputScalar, typename Derived>
dyn_mat<OutputScalar>
cwise(const Eigen::MatrixBase<Derived>& A,
OutputScalar (*f)(const typename Derived::Scalar&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cwise()");
// END EXCEPTION CHECKS
dyn_mat<OutputScalar> result(rA.rows(), rA.cols());
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < static_cast<idx>(rA.cols()); ++j)
for (idx i = 0; i < static_cast<idx>(rA.rows()); ++i)
result(i, j) = (*f)(rA(i, j));
return result;
}
// Kronecker product of multiple matrices, preserve return type
// variadic template
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::kron()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> kron(const T& head) {
return head;
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Kronecker product of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> kron(const T& head, const Args&... tail) {
return internal::kron2(head, kron(tail...));
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::vector of Eigen expressions
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kron(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::kron()");
for (auto&& elem : As)
if (!internal::check_nonzero_size(elem))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result = As[0].derived();
for (idx i = 1; i < As.size(); ++i) {
result = kron(result, As[i]);
}
return result;
}
// Kronecker product of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
kron(const std::initializer_list<Derived>& As) {
return kron(std::vector<Derived>(As));
}
/**
* \brief Kronecker power
* \see qpp::kron()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Kronecker product of \a A with itself \a n times \f$A^{\otimes n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kronpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kronpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::kronpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return kron(As);
}
// Direct sum of multiple matrices, preserve return type
// variadic template
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::dirsum()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> dirsum(const T& head) {
return head;
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Direct sum of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> dirsum(const T& head, const Args&... tail) {
return internal::dirsum2(head, dirsum(tail...));
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::vector of Eigen expressions
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsum(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::dirsum()");
for (auto&& elem : As)
if (!internal::check_nonzero_size(elem))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx total_rows = 0, total_cols = 0;
for (idx i = 0; i < As.size(); ++i) {
total_rows += static_cast<idx>(As[i].rows());
total_cols += static_cast<idx>(As[i].cols());
}
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Zero(total_rows, total_cols);
idx cur_row = 0, cur_col = 0;
for (idx i = 0; i < As.size(); ++i) {
result.block(cur_row, cur_col, As[i].rows(), As[i].cols()) = As[i];
cur_row += static_cast<idx>(As[i].rows());
cur_col += static_cast<idx>(As[i].cols());
}
return result;
}
// Direct sum of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
dirsum(const std::initializer_list<Derived>& As) {
return dirsum(std::vector<Derived>(As));
}
/**
* \brief Direct sum power
* \see qpp::dirsum()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Direct sum of \a A with itself \a n times \f$A^{\oplus n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsumpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsumpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::dirsumpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return dirsum(As);
}
/**
* \brief Reshape
*
* Uses column-major order when reshaping (same as MATLAB)
*
* \param A Eigen expression
* \param rows Number of rows of the reshaped matrix
* \param cols Number of columns of the reshaped matrix
* \return Reshaped matrix with \a rows rows and \a cols columns,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reshape(const Eigen::MatrixBase<Derived>& A,
idx rows, idx cols) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
idx Arows = static_cast<idx>(rA.rows());
idx Acols = static_cast<idx>(rA.cols());
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reshape()");
if (Arows * Acols != rows * cols)
throw exception::DimsMismatchMatrix("qpp::reshape()");
// END EXCEPTION CHECKS
return Eigen::Map<dyn_mat<typename Derived::Scalar>>(
const_cast<typename Derived::Scalar*>(rA.data()), rows, cols);
}
/**
* \brief Commutator
* \see qpp::anticomm()
*
* Commutator \f$ [A,B] = AB - BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Commutator \f$AB -BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> comm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::comm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::comm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::comm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::comm()");
// END EXCEPTION CHECKS
return rA * rB - rB * rA;
}
/**
* \brief Anti-commutator
* \see qpp::comm()
*
* Anti-commutator \f$ \{A,B\} = AB + BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Anti-commutator \f$AB +BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
anticomm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::anticomm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::anticomm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::anticomm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::anticomm()");
// END EXCEPTION CHECKS
return rA * rB + rB * rA;
}
/**
* \brief Projector
*
* Normalized projector onto state vector
*
* \param A Eigen expression
* \return Projector onto the state vector \a A, or the matrix \a Zero if \a A
* has norm zero, as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> prj(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prj()");
// check column vector
if (!internal::check_cvector(rA))
throw exception::MatrixNotCvector("qpp::prj()");
// END EXCEPTION CHECKS
double normA = norm(rA);
if (normA > 0)
return rA * adjoint(rA) / (normA * normA);
else
return dyn_mat<typename Derived::Scalar>::Zero(rA.rows(), rA.rows());
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::vector of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
// check empty list
if (!internal::check_nonzero_size(As))
throw exception::ZeroSize("qpp::grams()");
for (auto&& elem : As)
if (!internal::check_nonzero_size(elem))
throw exception::ZeroSize("qpp::grams()");
// check that As[0] is a column vector
if (!internal::check_cvector(As[0]))
throw exception::MatrixNotCvector("qpp::grams()");
// now check that all the rest match the size of the first vector
for (auto&& elem : As)
if (elem.rows() != As[0].rows() || elem.cols() != 1)
throw exception::DimsNotEqual("qpp::grams()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> cut =
dyn_mat<typename Derived::Scalar>::Identity(As[0].rows(), As[0].rows());
dyn_mat<typename Derived::Scalar> vi =
dyn_mat<typename Derived::Scalar>::Zero(As[0].rows(), 1);
std::vector<dyn_mat<typename Derived::Scalar>> outvecs;
// find the first non-zero vector in the list
idx pos = 0;
for (pos = 0; pos < As.size(); ++pos) {
if (norm(As[pos]) > 0) // add it as the first element
{
outvecs.emplace_back(As[pos]);
break;
}
}
// start the process
for (idx i = pos + 1; i < As.size(); ++i) {
cut -= prj(outvecs[i - 1 - pos]);
vi = cut * As[i];
outvecs.emplace_back(vi);
}
dyn_mat<typename Derived::Scalar> result(As[0].rows(), outvecs.size());
idx cnt = 0;
for (auto&& elem : outvecs) {
double normA = norm(elem);
if (normA > 0) // we add only the non-zero vectors
{
result.col(cnt) = elem / normA;
++cnt;
}
}
return result.block(0, 0, As[0].rows(), cnt);
}
// deduce the template parameters from initializer_list
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::initializer_list of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
grams(const std::initializer_list<Derived>& As) {
return grams(std::vector<Derived>(As));
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param A Eigen expression, the input vectors are the columns of \a A
* \return Gram-Schmidt vectors of the columns of \a A,
* as columns of a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::grams()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> input;
for (idx i = 0; i < static_cast<idx>(rA.cols()); ++i)
input.emplace_back(rA.col(i));
return grams<dyn_mat<typename Derived::Scalar>>(input);
}
/**
* \brief Non-negative integer index to multi-index
* \see qpp::multiidx2n()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param n Non-negative integer index
* \param dims Dimensions of the multi-partite system
* \return Multi-index of the same size as \a dims
*/
inline std::vector<idx> n2multiidx(idx n, const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::n2multiidx()");
if (n >= std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>()))
throw exception::OutOfRange("qpp::n2multiidx()");
// END EXCEPTION CHECKS
// double the size for matrices reshaped as vectors
idx result[2 * maxn];
internal::n2multiidx(n, dims.size(), dims.data(), result);
return std::vector<idx>(result, result + dims.size());
}
/**
* \brief Multi-index to non-negative integer index
* \see qpp::n2multiidx()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param midx Multi-index
* \param dims Dimensions of the multi-partite system
* \return Non-negative integer index
*/
inline idx multiidx2n(const std::vector<idx>& midx,
const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::multiidx2n()");
for (idx i = 0; i < dims.size(); ++i)
if (midx[i] >= dims[i])
throw exception::OutOfRange("qpp::multiidx2n()");
// END EXCEPTION CHECKS
return internal::multiidx2n(midx.data(), dims.size(), dims.data());
}
/**
* \brief Multi-partite qudit ket
* \see qpp::operator "" _ket()
*
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx n = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (n == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mket()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < n; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Multi-partite qudit ket
* \see qpp::operator "" _ket()
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, idx d = 2) {
idx n = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, n)));
// EXCEPTION CHECKS
// check zero size
if (n == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < n; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
std::vector<idx> dims(n, d);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx n = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (n == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mprj()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < n; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, idx d = 2) {
idx n = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, n)));
// EXCEPTION CHECKS
// check zero size
if (n == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < n; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
std::vector<idx> dims(n, d);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Computes the absolute values squared of an STL-like range
* of complex numbers
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Real vector consisting of the range absolute values squared
*/
template <typename InputIterator>
std::vector<double> abssq(InputIterator first, InputIterator last) {
std::vector<double> weights(std::distance(first, last));
std::transform(first, last, std::begin(weights),
[](cplx z) -> double { return std::norm(z); });
return weights;
}
/**
* \brief Computes the absolute values squared of an STL-like container
*
* \param c STL-like container
* \return Real vector consisting of the container's absolute values squared
*/
template <typename Container>
std::vector<double>
abssq(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr)
// we need the std::enable_if to SFINAE out Eigen expressions
// that will otherwise match, instead of matching
// the overload below:
// template<typename Derived>
// abssq(const Eigen::MatrixBase<Derived>& A)
{
return abssq(std::begin(c), std::end(c));
}
/**
* \brief Computes the absolute values squared of an Eigen expression
* \param A Eigen expression
* \return Real vector consisting of the absolute values squared
*/
template <typename Derived>
std::vector<double> abssq(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::abssq()");
// END EXCEPTION CHECKS
return abssq(rA.data(), rA.data() + rA.size());
}
/**
* \brief Element-wise sum of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise sum of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
sum(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(0));
}
/**
* \brief Element-wise sum of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise sum of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
sum(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return sum(std::begin(c), std::end(c));
}
/**
* \brief Element-wise product of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise product of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
prod(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(1),
std::multiplies<value_type>());
}
/**
* \brief Element-wise product of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise product of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
prod(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return prod(std::begin(c), std::end(c));
}
/**
* \brief Finds the pure state representation of a matrix
* proportional to a projector onto a pure state
*
* \note No purity check is done, the input state \a A must have rank one,
* otherwise the function returns the first non-zero eigenvector of \a A
*
* \param A Eigen expression, assumed to be proportional
* to a projector onto a pure state, i.e. \a A is assumed to have rank one
* \return The unique non-zero eigenvector of \a A (up to a phase),
* as a dynamic column vector over the same scalar field as \a A
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
rho2pure(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::rho2pure()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::rho2pure()");
// END EXCEPTION CHECKS
dyn_col_vect<double> tmp_evals = hevals(rA);
cmat tmp_evects = hevects(rA);
dyn_col_vect<typename Derived::Scalar> result =
dyn_col_vect<typename Derived::Scalar>::Zero(rA.rows());
// find the non-zero eigenvector
// there is only one, assuming the state is pure
for (idx k = 0; k < static_cast<idx>(rA.rows()); ++k) {
if (std::abs(tmp_evals(k)) > 0) {
result = tmp_evects.col(k);
break;
}
}
return result;
}
/**
* \brief Constructs the complement of a subsystem vector
*
* \param subsys Subsystem vector
* \param n Total number of systems
* \return Complement of \a subsys with respect to the set
* \f$\{0, 1, \ldots, n - 1\}\f$
*/
inline std::vector<idx> complement(std::vector<idx> subsys, idx n) {
// EXCEPTION CHECKS
if (n < subsys.size())
throw exception::OutOfRange("qpp::complement()");
for (idx i = 0; i < subsys.size(); ++i)
if (subsys[i] >= n)
throw exception::SubsysMismatchDims("qpp::complement()");
// END EXCEPTION CHECKS
std::vector<idx> all(n);
std::vector<idx> subsys_bar(n - subsys.size());
std::iota(std::begin(all), std::end(all), 0);
std::sort(std::begin(subsys), std::end(subsys));
std::set_difference(std::begin(all), std::end(all), std::begin(subsys),
std::end(subsys), std::begin(subsys_bar));
return subsys_bar;
}
/**
* \brief Computes the 3-dimensional real Bloch vector
* corresponding to the qubit density matrix \a A
* \see qpp::bloch2rho()
*
* \note It is implicitly assumed that the density matrix is Hermitian
*
* \param A Eigen expression
* \return 3-dimensional Bloch vector
*/
template <typename Derived>
std::vector<double> rho2bloch(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check qubit matrix
if (!internal::check_qubit_matrix(rA))
throw exception::NotQubitMatrix("qpp::rho2bloch()");
// END EXCEPTION CHECKS
std::vector<double> result(3);
cmat X(2, 2), Y(2, 2), Z(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
result[0] = std::real(trace(rA * X));
result[1] = std::real(trace(rA * Y));
result[2] = std::real(trace(rA * Z));
return result;
}
/**
* \brief Computes the density matrix corresponding to
* the 3-dimensional real Bloch vector \a r
* \see qpp::rho2bloch()
*
* \param r 3-dimensional real vector
* \return Qubit density matrix
*/
inline cmat bloch2rho(const std::vector<double>& r) {
// EXCEPTION CHECKS
// check 3-dimensional vector
if (r.size() != 3)
throw exception::CustomException("qpp::bloch2rho",
"r is not a 3-dimensional vector!");
// END EXCEPTION CHECKS
cmat X(2, 2), Y(2, 2), Z(2, 2), Id2(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
Id2 << 1, 0, 0, 1;
return (Id2 + r[0] * X + r[1] * Y + r[2] * Z) / 2.;
}
inline namespace literals {
// Idea taken from http://techblog.altplus.co.jp/entry/2017/11/08/130921
/**
* \brief Multi-partite qubit ket user-defined literal
* \see qpp::mket()
*
* Constructs the multi-partite qubit ket \f$|\mathrm{Bits}\rangle\f$
*
* \tparam Bits String of binary numbers representing the qubit ket
* \return Multi-partite qubit ket, as a complex dynamic column vector
*/
template <char... Bits>
ket operator"" _ket() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::ket q = qpp::ket::Zero(static_cast<idx>(std::llround(std::pow(2, n))));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _ket())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit bra user-defined literal
* \see qpp::mket() and qpp::adjoint()
*
* Constructs the multi-partite qubit bra \f$\langle\mathrm{Bits}|\f$
*
* \tparam Bits String of binary numbers representing the qubit bra
* \return Multi-partite qubit bra, as a complex dynamic row vector
*/
template <char... Bits>
bra operator"" _bra() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::bra q = qpp::ket::Zero(static_cast<idx>(std::llround(std::pow(2, n))));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _bra())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit projector user-defined literal
* \see qpp::mprj()
*
* Constructs the multi-partite qubit projector
* \f$|\mathrm{Bits}\rangle\langle\mathrm{Bits}|\f$ (in the computational basis)
*
* \tparam Bits String of binary numbers representing the qubit state
* to project on
* \return Multi-partite qubit projector, as a complex dynamic matrix
*/
template <char... Bits>
cmat operator"" _prj() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _prj())xxx");
}
// END EXCEPTION CHECKS
return kron(operator""_ket<Bits...>(), operator""_bra<Bits...>());
}
} /* namespace literals */
namespace internal {
// hash combine, code taken from boost::hash_combine(), see
// https://www.boost.org/doc/libs/1_69_0/doc/html/hash/reference.html#boost.hash_combine
template <class T>
void hash_combine(std::size_t& seed, const T& v) {
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
} /* namespace internal */
/**
* \brief Computes the hash of en Eigen matrix/vector/expression
* \note Code taken from boost::hash_combine(), see
* https://www.boost.org/doc/libs/1_69_0/doc/html/hash/reference.html#boost.hash_combine
*
* \param A Eigen expression
* \param seed Seed, 0 by default
* \return Hash of its argument
*/
template <typename Derived>
std::size_t hash_eigen(const Eigen::MatrixBase<Derived>& A,
std::size_t seed = 0) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hash_eigen()");
// END EXCEPTION CHECKS
auto* p = rA.data();
idx sizeA = static_cast<idx>(rA.size());
for (idx i = 0; i < sizeA; ++i) {
internal::hash_combine(seed, std::real(p[i]));
internal::hash_combine(seed, std::imag(p[i]));
}
return seed;
}
namespace internal {
/**
* \class qpp::internal::HashEigen
* \brief Functor for hashing Eigen expressions
*/
struct HashEigen {
template <typename Derived>
std::size_t operator()(const Eigen::MatrixBase<Derived>& A) const {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
return hash_eigen(rA);
}
};
/**
* \class qpp::internal::EqualEigen
* \brief Functor for comparing Eigen expressions for equality
* \note Works without assertion fails even if the dimensions of the arguments
* are different (in which case simply returns false
*/
struct EqualEigen {
template <typename Derived>
bool operator()(const Eigen::MatrixBase<Derived>& A,
const Eigen::MatrixBase<Derived>& B) const {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
const dyn_mat<typename Derived::Scalar>& rB = B.derived();
if (rA.rows() == rB.rows() && rA.cols() == rB.cols())
return rA == rB ? true : false;
else
return false;
}
};
} /* namespace internal */
} /* namespace qpp */
#endif /* FUNCTIONS_H_ */
|
Parallel.c | /** This Program uses Parallel construct clause variables like Private, firstprivate, shared, default, copyin and Reduction.
* Sai Suraj
* 07/09/2021
**/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int a = 12345;
#pragma omp threadprivate(a)
int main(int argc, char* argv[])
{
omp_set_num_threads(4);//set number of threads to 4
int Number = 246810;
printf("\n\nPrivate Clause: \n\n\tValue Before Parallel Private Region : %d.\n", Number);
#pragma omp parallel private(Number)
{
printf("Thread %d : \"val\" = %d.\n", omp_get_thread_num(), Number);
Number = omp_get_thread_num();
}
// Value after the parallel region; unchanged.
printf("\tValue After Parallel Private Region : %d.\n", Number);
printf("\n\nFirstPrivate Clause: \n\n\tValue Before Parallel FirstPrivate Region : %d.\n", Number);
#pragma omp parallel firstprivate(Number)
{
printf("Thread %d : \"val\" = %d.\n", omp_get_thread_num(), Number);
Number = omp_get_thread_num();
}
// Value after the parallel region; unchanged.
printf("\tValue After Parallel FirstPrivate Region %d.\n", Number);
Number = 555;
printf("\n\nShared Clause: \n\n\tValue Before Parallel Shared Region : %d.\n", Number);
#pragma omp parallel shared(Number)
{
printf("Thread %d : Value is %d.\n",omp_get_thread_num(), Number);
#pragma omp master
{
printf("Thread %d : Changed value to 978.\n",omp_get_thread_num());
Number = 978;
}
printf("Thread %d : Value is %d.\n",omp_get_thread_num(), Number);
}
printf("After Parallel Region : Value is %d.\n", Number);
int val = 0;
printf("\n\nDefault Clause:\n");
#pragma omp parallel default(shared)
{
if(omp_get_thread_num() <= 1)
{
printf("Thread %d sets the value of \"val\" to 123.\n",omp_get_thread_num());
val = 12345;
}
printf("Thread %d reads the value of \"val\": %d.\n", omp_get_thread_num(),val);
}
printf("\n\nCopyin Clause: \n\tValue Before Parallel Copyin Region : %d.\n", a);
#pragma omp parallel copyin(a)
{
#pragma omp master
{
printf("First parallel region: Master thread value to 67890.\n");
a = 67890;
}
printf("First parallel region: Thread %d: Value = %d.\n", omp_get_thread_num(), a);
}
#pragma omp parallel copyin(a)
{
printf("Second parallel region: Thread %d: Value = %d.\n", omp_get_thread_num(), a);
}
// Use 2 threads when creating OpenMP parallel regions
omp_set_num_threads(4);
int Value = 10;
int Sum = 0;
#pragma omp parallel for default(none) firstprivate(Value) reduction(+: Sum)
for (int i = 0; i < Value; i++)
{
Sum += (i+1);
}
printf("\nSum of %d = %d\n\n", Value, Sum);
return 0;
}
|
GB_binop__bxnor_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int64)
// C=scalar+B GB (_bind1st__bxnor_int64)
// C=scalar+B' GB (_bind1st_tran__bxnor_int64)
// C=A+scalar GB (_bind2nd__bxnor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_jacobi_avx512.c | #include "rex_kmp.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <immintrin.h>
#include <immintrin.h>
#define REAL float
static double read_timer_ms()
{
struct timeb tm;
ftime(&tm);
return ((double )tm . time) * 1000.0 + ((double )tm . millitm);
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title,char *name,float *A,int n,int m)
{
printf("%s:\n",title);
int i;
int j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p)
{
int i;
int j;
int xx;
int yy;
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + ( *dx * (i - 1))));
yy = ((int )(- 1.0 + ( *dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p)
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
error = 0.0;
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n",error);
}
void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits);
void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits);
int main(int argc,char *argv[])
{
int status = 0;
int n = 256;
int m = 256;
float alpha = 0.0543;
float tol = 0.0000000001;
float relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1],"%d",&n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
}
else if (argc == 4) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
}
else if (argc == 5) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
}
else if (argc == 6) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
sscanf(argv[5],"%g",&relax);
}
else if (argc == 7) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
sscanf(argv[5],"%g",&relax);
sscanf(argv[6],"%d",&mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
float *u = (float *)(malloc(sizeof(float ) * n * m));
float *uomp = (float *)(malloc(sizeof(float ) * n * m));
float *f = (float *)(malloc(sizeof(float ) * n * m));
float dx;
/* grid spacing in x direction */
float dy;
/* grid spacing in y direction */
initialize(n,m,alpha,&dx,&dy,u,f);
memcpy(uomp,u,sizeof(float ) * n * m);
double elapsed = read_timer_ms();
jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits);
elapsed = read_timer_ms() - elapsed;
printf("seq elasped time(ms): %4f\n",elapsed);
double mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed;
printf("MFLOPS: %12.6g\n",mflops);
puts("================");
elapsed = read_timer_ms();
jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits);
elapsed = read_timer_ms() - elapsed;
printf("OpenMP elasped time(ms): %4f\n",elapsed);
mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed;
printf("MFLOPS: %12.6g\n",mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n,m,alpha,dx,dy,u,f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits)
{
int i;
int j;
int k;
float error;
float ax;
float ay;
float b;
float resid;
float uold[n][m];
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < n - 1; i++)
for (j = 1; j < m - 1; j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
printf("Total Number of Iterations: %d\n",k);
printf("Residual: %.15g\n",error);
}
void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits)
{
int i;
int j;
int k;
float error;
float ax;
float ay;
float b;
float resid;
float *tmp = (float *)(malloc(sizeof(float ) * n * m));
float (*uold)[m] = ((float (*)[m])tmp);
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
while(k <= mits && error > tol){
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++) {
for (j = 0; j <= m - 1; j += 16) {
float *__ptr39 = uold[i];
float *__ptr40 = u[i];
__m512 __vec41 = _mm512_loadu_ps(&__ptr40[j]);
_mm512_storeu_ps(&__ptr39[j],__vec41);
}
}
for (i = 1; i < n - 1; i++) {
__m512 __vec0 = _mm512_set1_ps(ax);
__m512 __vec7 = _mm512_set1_ps(ay);
__m512 __vec15 = _mm512_set1_ps(b);
__m512 __vec23 = _mm512_set1_ps(b);
__m512 __part25 = _mm512_setzero_ps();
__m512 __vec29 = _mm512_set1_ps(omega);
__m512 __vec30 = _mm512_set1_ps(resid);
__m512 __vec33 = _mm512_set1_ps(error);
__m512 __vec34 = _mm512_set1_ps(resid);
__m512 __vec35 = _mm512_set1_ps(resid);
__m512 __part38 = _mm512_setzero_ps();
for (j = 1; j <= m - 1 - 1; j += 16) {
float *__ptr1 = uold[i - 1];
__m512 __vec2 = _mm512_loadu_ps(&__ptr1[j]);
float *__ptr3 = uold[i + 1];
__m512 __vec4 = _mm512_loadu_ps(&__ptr3[j]);
__m512 __vec5 = _mm512_add_ps(__vec4,__vec2);
__m512 __vec6 = _mm512_mul_ps(__vec5,__vec0);
float *__ptr8 = uold[i];
__m512 __vec9 = _mm512_loadu_ps(&__ptr8[j - 1]);
float *__ptr10 = uold[i];
__m512 __vec11 = _mm512_loadu_ps(&__ptr10[j + 1]);
__m512 __vec12 = _mm512_add_ps(__vec11,__vec9);
__m512 __vec13 = _mm512_mul_ps(__vec12,__vec7);
__m512 __vec14 = _mm512_add_ps(__vec13,__vec6);
float *__ptr16 = uold[i];
__m512 __vec17 = _mm512_loadu_ps(&__ptr16[j]);
__m512 __vec18 = _mm512_mul_ps(__vec17,__vec15);
__m512 __vec19 = _mm512_add_ps(__vec18,__vec14);
float *__ptr20 = f[i];
__m512 __vec21 = _mm512_loadu_ps(&__ptr20[j]);
__m512 __vec22 = _mm512_sub_ps(__vec21,__vec19);
__m512 __vec24 = _mm512_div_ps(__vec23,__vec22);
__part25 = _mm512_add_ps(__part25,__vec24);
float *__ptr26 = u[i];
float *__ptr27 = uold[i];
__m512 __vec28 = _mm512_loadu_ps(&__ptr27[j]);
__m512 __vec31 = _mm512_mul_ps(__vec30,__vec29);
__m512 __vec32 = _mm512_sub_ps(__vec31,__vec28);
_mm512_storeu_ps(&__ptr26[j],__vec32);
__m512 __vec36 = _mm512_mul_ps(__vec35,__vec34);
__m512 __vec37 = _mm512_add_ps(__vec36,__vec33);
__part38 = _mm512_add_ps(__part38,__vec37);
}
__m256 __buf3 = _mm512_extractf32x8_ps(__part38,0);
__m256 __buf4 = _mm512_extractf32x8_ps(__part38,1);
__buf4 = _mm256_add_ps(__buf3,__buf4);
__buf4 = _mm256_hadd_ps(__buf4,__buf4);
__buf4 = _mm256_hadd_ps(__buf4,__buf4);
float __buf5[8];
_mm256_storeu_ps(&__buf5,__buf4);
error = __buf5[0] + __buf5[6];
__m256 __buf0 = _mm512_extractf32x8_ps(__part25,0);
__m256 __buf1 = _mm512_extractf32x8_ps(__part25,1);
__buf1 = _mm256_add_ps(__buf0,__buf1);
__buf1 = _mm256_hadd_ps(__buf1,__buf1);
__buf1 = _mm256_hadd_ps(__buf1,__buf1);
float __buf2[8];
_mm256_storeu_ps(&__buf2,__buf1);
resid = __buf2[0] + __buf2[6];
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
printf("Total Number of Iterations: %d\n",k);
printf("Residual: %.15g\n",error);
free(tmp);
}
|
openssl_enc_fmt_plug.c | /* OpenSSL "enc" cracker for JtR.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall.com>
*
* $ openssl enc -aes-256-cbc -p -e -a -salt -in hello.txt -out hello.txt.enc
* enter aes-256-cbc encryption password:
* Verifying - enter aes-256-cbc encryption password:
* salt=305CEDC2A0521011
* key=E08A1E6E1493BD3D3DAA25E112259D1688F7A0302AC8C16208DBDCEF179765F0
* iv =582FDDF9603B9B03A54FC0BB34370DDE
*
* $ cat hello.txt
* 123456789012
*
* Input Format:
*
* $openssl$cipher$md$salt-size$salt$last-chunks$inlined$known-plaintext$plaintext
* $openssl$cipher$md$salt-size$salt$last-chunks$0$datalen$data$known-plaintext$plaintext
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_openssl;
#elif FMT_REGISTERS_H
john_register_one(&fmt_openssl);
#else
#if AC_BUILT
#include "autoconfig.h"
#endif
#ifdef __CYGWIN__
// cygwin has HORRIBLE performance GOMP for this format it runs at 1/#cpu's the speed of OMP_NUM_THREADS=1 or non-GMP build
#undef _OPENMP
#undef FMT_OMP
#define FMT_OMP 0
#endif
#include <string.h>
#include <errno.h>
#if !AC_BUILT || HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <stdlib.h>
#include "stdint.h"
#include <sys/types.h>
#include <openssl/evp.h>
#include <openssl/aes.h>
#include "md5.h"
#include "arch.h"
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "jumbo.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 8
#endif
#include "memdbg.h"
#define FORMAT_LABEL "openssl-enc"
#define FORMAT_NAME "OpenSSL \"enc\" encryption"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 8
#define MAX_KEYS_PER_CRYPT 8
#define PLAINTEXT_LENGTH 125
#define FORMAT_TAG "$openssl$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[16];
int cipher;
int md;
int inlined;
int kpa;
int datalen;
unsigned char kpt[256];
unsigned char data[16 * 16];
unsigned char last_chunks[32];
} *cur_salt;
static struct fmt_tests openssl_tests[] = {
{"$openssl$1$0$8$a1a5e529c8d92da5$8de763bf61377d365243993137ad9729$1$0", "password"},
{"$openssl$1$1$8$844527fb2f5d7ad5$ebccb1fcd2b1b30c5c3624d4016978ea$1$0", "password"},
{"$openssl$0$0$8$305cedc2a0521011$bf11609a01e78ec3f50f0cc483e636f9$1$0", "password"},
{"$openssl$0$0$8$305cedc2a0521011$bf11609a01e78ec3f50f0cc483e636f9$1$1$123456", "password"},
{"$openssl$0$0$8$3993671be477e8f0$95384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0$256$9bbbc2af64ba27444370e3b3db6f4077a5b83c099a9b0a13d0c03dbc89185aad078266470bb15c44e7b35aef66f456ba7f44fb0f60824331f5b598347cd471c6745374c7dbecf49a1dd0378e938bb9d3d68703e3038805fb3c7bf0623222bcc8e9375b10853aa7c991ddd086b8e2a97dd9ddd351ee0facde9bc3529742f0ffab990db046f5a64765d7a4b1c83b0290acae3eaa09278933cddcf1fed0ab14d408cd43fb73d830237dcd681425cd878bf4b542c108694b90e82f912c4aa4de02bd002dce975c2bb308aad933bfcfd8375d91837048d110f007ba3852dbb498a54595384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0", "password"},
{"$openssl$0$0$8$3993671be477e8f0$95384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0$256$9bbbc2af64ba27444370e3b3db6f4077a5b83c099a9b0a13d0c03dbc89185aad078266470bb15c44e7b35aef66f456ba7f44fb0f60824331f5b598347cd471c6745374c7dbecf49a1dd0378e938bb9d3d68703e3038805fb3c7bf0623222bcc8e9375b10853aa7c991ddd086b8e2a97dd9ddd351ee0facde9bc3529742f0ffab990db046f5a64765d7a4b1c83b0290acae3eaa09278933cddcf1fed0ab14d408cd43fb73d830237dcd681425cd878bf4b542c108694b90e82f912c4aa4de02bd002dce975c2bb308aad933bfcfd8375d91837048d110f007ba3852dbb498a54595384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$1$00000000", "password"},
{NULL}
};
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_tiny(sizeof(*cracked) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
//#define DEBUG_VALID
#ifdef DEBUG_VALID
// Awesome debug macro for valid()
#define return if(printf("\noriginal: %s\n",ciphertext)+printf("fail line %u: '%s' p=%p q=%p q-p-1=%u\n",__LINE__,p,p,q,(unsigned int)(q-p-1)))return
#endif
#define HEX_DIGITS "0123456789abcdefABCDEF"
#define DEC_DIGITS "0123456789"
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext, *q = NULL;
int len;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
p += TAG_LENGTH; // cipher
q = strchr(p, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) != 1)
return 0;
if (*p != '0' && *p != '1')
return 0;
p = q; q = strchr(p, '$'); // md
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) != 1)
return 0;
if (*p != '0' && *p != '1')
return 0;
p = q; q = strchr(p, '$'); // salt-size
if (!q)
return 0;
q = q + 1;
len = strspn(p, DEC_DIGITS);
if (len < 1 || len > 2 || len != q - p - 1)
return 0;
len = atoi(p);
if (len < 1 || len > sizeof(cur_salt->salt))
return 0;
p = q; q = strchr(p, '$'); // salt
if (!q)
return 0;
q = q + 1;
if (2 * len != q - p - 1 || 2 * len != strspn(p, HEX_DIGITS))
return 0;
p = q; q = strchr(p, '$'); // last-chunks
if (!q)
return 0;
q = q + 1;
len = strspn(p, HEX_DIGITS);
if (len != q - p - 1 || len < 2 || len & 1 || len > sizeof(cur_salt->data))
return 0;
p = q; q = strchr(p, '$'); // inlined
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) != 1)
return 0;
if (*p != '0' && *p != '1')
return 0;
if (*p == '0') {
p = q; q = strchr(p, '$'); // datalen
if (!q)
return 0;
q = q + 1;
len = strspn(p, DEC_DIGITS);
if (len < 1 || len > 3 || len != q - p - 1)
return 0;
len = atoi(p);
if (len < 1 || len > sizeof(cur_salt->data))
return 0;
p = q; q = strchr(p, '$'); // data
if (!q)
return 0;
q = q + 1;
if (2 * len != q - p - 1 || 2 * len != strspn(p, HEX_DIGITS))
return 0;
}
p = q; q = strchr(p, '$'); // known-plaintext
if (!q)
return !strcmp(p, "0");
if(strlen(q) == 1)
return 0;
q = q + 1;
if ((q - p - 1) != 1)
return 0;
if (*p != '0' && *p != '1')
return 0;
if (strlen(q) > sizeof(cur_salt->kpt) - 1)
return 0;
#ifdef DEBUG_VALID
#undef return
#endif
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i, res;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtok(ctcopy, "$");
cs.cipher = atoi(p);
p = strtok(NULL, "$");
cs.md = atoi(p);
p = strtok(NULL, "$");
cs.saltlen = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
res = strlen(p) / 2;
for (i = 0; i < res; i++)
cs.last_chunks[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.inlined = atoi(p);
if (cs.inlined) {
p = strtok(NULL, "$");
cs.kpa = atoi(p);
if (cs.kpa) {
p = strtok(NULL, "$");
strncpy((char*)cs.kpt, p, 255);
}
}
else {
p = strtok(NULL, "$");
cs.datalen = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.datalen; i++)
cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.kpa = atoi(p);
if (cs.kpa) {
p = strtok(NULL, "$");
strncpy((char*)cs.kpt, p, 255);
}
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static int kpa(unsigned char *key, unsigned char *iv, int inlined)
{
AES_KEY akey;
unsigned char out[16*16];
if(AES_set_decrypt_key(key, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
}
if (inlined) {
AES_cbc_encrypt(cur_salt->last_chunks, out, 16, &akey, iv, AES_DECRYPT);
if (memmem(out, 16, cur_salt->kpt, strlen((char*)cur_salt->kpt)))
return 0;
}
else {
AES_cbc_encrypt(cur_salt->data, out, cur_salt->datalen, &akey, iv, AES_DECRYPT);
if (memmem(out, cur_salt->datalen, cur_salt->kpt, strlen((char*)cur_salt->kpt)))
return 0;
}
return -1;
}
static int decrypt(char *password)
{
unsigned char out[16];
int pad, n, i;
AES_KEY akey;
unsigned char iv[16];
unsigned char biv[16];
unsigned char key[32];
int nrounds = 1;
// FIXME handle more stuff
switch(cur_salt->cipher) {
case 0:
switch(cur_salt->md) {
case 0:
EVP_BytesToKey(EVP_aes_256_cbc(), EVP_md5(),
cur_salt->salt, (unsigned char*)password,
strlen(password), nrounds, key, iv);
AES_set_decrypt_key(key, 256, &akey);
break;
case 1:
EVP_BytesToKey(EVP_aes_256_cbc(), EVP_sha1(),
cur_salt->salt, (unsigned char*)password,
strlen(password), nrounds, key, iv);
AES_set_decrypt_key(key, 256, &akey);
break;
}
break;
case 1:
switch(cur_salt->md) {
case 0:
EVP_BytesToKey(EVP_aes_128_cbc(), EVP_md5(),
cur_salt->salt, (unsigned char*)password,
strlen(password), nrounds, key, iv);
AES_set_decrypt_key(key, 128, &akey);
break;
case 1:
EVP_BytesToKey(EVP_aes_128_cbc(), EVP_sha1(),
cur_salt->salt, (unsigned char*)password,
strlen(password), nrounds, key, iv);
AES_set_decrypt_key(key, 128, &akey);
break;
}
break;
}
memcpy(biv, iv, 16);
if (cur_salt->inlined)
AES_cbc_encrypt(cur_salt->last_chunks, out, 16, &akey, iv, AES_DECRYPT);
else {
memcpy(iv, cur_salt->last_chunks, 16);
AES_cbc_encrypt(cur_salt->last_chunks + 16, out, 16, &akey, iv, AES_DECRYPT);
}
// FIXME use padding check for CBC mode only
// now check padding
pad = out[16 - 1];
if(pad < 1 || pad > 16)
return -1;
n = 16 - pad;
for(i = n; i < 16; i++)
if(out[i] != pad)
return -1;
if(cur_salt->kpa)
return kpa(key, biv, cur_salt->inlined);
return 0;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void openssl_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
if (decrypt(saved_key[index]) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_openssl = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
/*
* FIXME: if there wouldn't be so many false positives,
* it would be useful to report some tunable costs
*/
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
openssl_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
openssl_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__identity_int64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_fp64
// op(A') function: GB_tran__identity_int64_fp64
// C type: int64_t
// A type: double
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z ; GB_CAST_SIGNED(z,aij,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_fp64
(
int64_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
generator_gemm_common.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
}
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
/* that should no happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( ( i_arch < LIBXSMM_X86_SSE3 ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc) {
LIBXSMM_UNUSED(i_xgemm_desc);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop);
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_help_0, l_b_offset );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done,
const unsigned int i_k_unrolled ) {
/* advance C pointer */
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
}
if (i_k_unrolled == 0) {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* advance A pointer */
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
} else {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it is possible to generate the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif /*!defined(NDEBUG)*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
} else if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into ymm */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF);
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSLLD,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF,
16);
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* we only mask the last m-blocked load */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* select store instruction */
unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction;
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif
/* in case of IGEMM just do some potential conversion to FP */
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* load address of scaling factor from stack */
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
48,
i_gp_reg_mapping->gp_reg_help_1,
0 );
/* broadcast scaling factor into a vector register */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_help_1,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name, 0,
0, 1, 0 );
/* loop over the accumulator, convert and scale */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* convert current accumulator register into FP32 */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF );
/* scale it */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VFMADD213PS,
0,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF,
0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n));
} else {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
} else if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
#if 0
/* push 0x7f800000 on the stack, naninf masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x7f800000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00010000 on the stack, fixup masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00010000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00007fff on the stack, rneadd */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00007fff);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00000001 on the stack, fixup */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00000001);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
#endif
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSRAD,
i_micro_kernel_config->vector_name,
reg_X,
reg_X,
LIBXSMM_X86_VEC_REG_UNDEF,
16);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVDW,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
LIBXSMM_X86_VEC_REG_UNDEF);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
}
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
unsigned int l_m_2_blocking = (l_m_blocking/2)*2;
l_m = 0;
for ( ; l_m < l_m_2_blocking; l_m+=2 ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTNE2PS2BF16,
i_micro_kernel_config->vector_name,
reg_X, reg_X2,
0,
0);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 0, 0, 1 );
}
for ( ; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
0);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
}
} else {
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code,
const unsigned int i_gp_reg_tmp,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_mask_count ) {
unsigned int l_mask;
/* init full mask */
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_mask = 0xff;
} else {
l_mask = 0xffff;
}
/* shift right by "inverse" remainder */
l_mask = l_mask >> i_mask_count;
/* move mask to GP register */
libxsmm_x86_instruction_alu_imm( io_generated_code,
LIBXSMM_X86_INSTR_MOVQ,
i_gp_reg_tmp,
l_mask );
if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVW,
i_gp_reg_tmp,
LIBXSMM_X86_AVX512_MASK );
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
}
|
GB_binop__pow_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int64)
// C=scalar+B GB (_bind1st__pow_int64)
// C=scalar+B' GB (_bind1st_tran__pow_int64)
// C=A+scalar GB (_bind2nd__pow_int64)
// C=A'+scalar GB (_bind2nd_tran__pow_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_pow_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT64 || GxB_NO_POW_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
path.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : path.c
* Description : path drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_path_c__
#define __libaroma_path_c__
#include <aroma_internal.h>
/*
* Function : libaroma_path
* Return Value: LIBAROMA_PATHP
* Descriptions: create new path
*/
LIBAROMA_PATHP libaroma_path(float x, float y){
LIBAROMA_PATHP path = (LIBAROMA_PATHP) calloc(sizeof(LIBAROMA_PATH),1);
if (!path){
ALOGW("libaroma_path alloc LIBAROMA_PATHP failed");
return NULL;
}
path->p=(LIBAROMA_PATH_POINTP) malloc(sizeof(LIBAROMA_PATH_POINT)*32);
if (!path->p){
free(path);
ALOGW("libaroma_path alloc path->p failed");
return NULL;
}
path->p[0].x=x;
path->p[0].y=y;
path->max.x=path->min.x=x;
path->max.y=path->min.y=y;
path->n=1;
return path;
} /* End of libaroma_path */
/*
* Function : libaroma_path_free
* Return Value: byte
* Descriptions: free path
*/
byte libaroma_path_free(LIBAROMA_PATHP path){
if (!path){
return 0;
}
if (path->p){
free(path->p);
}
free(path);
return 1;
} /* End of libaroma_path_free */
/*
* Function : libaroma_path_add
* Return Value: byte
* Descriptions: add point into path
*/
byte libaroma_path_add(LIBAROMA_PATHP path, float x, float y){
if (!path){
return 0;
}
if (!path->p){
return 0;
}
if (path->n%32==0){
LIBAROMA_PATH_POINTP newp = (LIBAROMA_PATH_POINTP) realloc(
path->p,sizeof(LIBAROMA_PATH_POINT)*(path->n+32)
);
if (!newp){
ALOGW("libaroma_path_add cannot realloc path->p");
return 0;
}
path->p = newp;
}
path->p[path->n].x=x;
path->p[path->n].y=y;
path->max.x=MAX(path->max.x,x);
path->max.y=MAX(path->max.y,y);
path->min.x=MIN(path->min.x,x);
path->min.y=MIN(path->min.y,y);
path->n++;
return 1;
} /* End of libaroma_path_add */
/*
* Function : libaroma_path_curve_calc
* Return Value: void
* Descriptions: calculating bezier curve
*/
void libaroma_path_curve_calc(
float t,
float *x, float *y,
float x0, float y0,
float x1, float y1,
float x2, float y2,
float x3, float y3){
float u = 1-t;
float tt = t*t;
float uu = u*u;
float uuu = uu * u;
float ttt = tt * t;
/* calculating */
*x = uuu * x0;
*x += 3 * uu * t * x1;
*x += 3 * u * tt * x2;
*x += ttt * x3;
*y = uuu * y0;
*y += 3 * uu * t * y1;
*y += 3 * u * tt * y2;
*y += ttt * y3;
} /* End of libaroma_path_curve_calc */
/*
* Function : _libaroma_path_curve_findpoint
* Return Value: byte
* Descriptions: find curve path points
*/
byte _libaroma_path_curve_findpoint(
LIBAROMA_PATHP path,
float t0, float t1,
float x0, float y0,
float x1, float y1,
float x2, float y2,
float x3, float y3,
float xt0, float yt0,
float xt1, float yt1
){
if (t0==t1){
return 0;
}
float thalf = t0 + ((t1 - t0) / 2);
float xt, yt;
libaroma_path_curve_calc(thalf, &xt, &yt,x0,y0,x1,y1,x2,y2,x3,y3);
if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) {
_libaroma_path_curve_findpoint(
path,t0,thalf,x0,y0,x1,y1,x2,y2,x3,y3,xt0,yt0,xt,yt);
}
libaroma_path_add(path, xt, yt);
if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) {
_libaroma_path_curve_findpoint(
path,thalf,t1,x0,y0,x1,y1,x2,y2,x3,y3,xt,yt,xt1,yt1);
}
libaroma_path_add(path, xt1, yt1);
return 1;
} /* End of _libaroma_path_curve_findpoint */
/*
* Function : libaroma_path_curve
* Return Value: byte
* Descriptions: add curve point
*/
byte libaroma_path_curve(
LIBAROMA_PATHP path,
int resolution,
float x1, float y1,
float x2, float y2,
float x3, float y3
){
if (!path){
return 0;
}
if (!path->p){
return 0;
}
if (resolution<1){
/* dynamic hi res curve calculation */
float x0 = path->p[path->n-1].x;
float y0 = path->p[path->n-1].y;
_libaroma_path_curve_findpoint(
path,0,1,x0,y0,x1,y1,x2,y2,x3,y3,x0,y0,x3,y3);
}
else{
/* fixed resolution */
int i;
float x0 = path->p[path->n-1].x;
float y0 = path->p[path->n-1].y;
int px = round(x0);
int py = round(y0);
for(i=0;i<resolution;i++){
float x, y;
float t = i / ((float) (resolution-1));
libaroma_path_curve_calc(t,&x,&y,x0,y0,x1,y1,x2,y2,x3,y3);
int rx = round(x);
int ry = round(y);
if ((px!=rx)||(py!=ry)){
libaroma_path_add(path, x, y);
}
}
}
return 1;
} /* End of libaroma_path_curve */
/*
* Function : libaroma_path_draw
* Return Value: byte
* Descriptions: draw path
*/
byte libaroma_path_draw(
LIBAROMA_CANVASP dest,
LIBAROMA_PATHP path,
word color,
byte alpha,
byte is_mask,
float aliasing){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if (!path){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
if (aliasing<=0){
aliasing=1;
}
if (aliasing>1){
aliasing=1;
}
/* fill */
if (path->n>1){
int miny = MAX(0,floor(path->min.y));
int maxy = MIN(dest->h-1,ceil(path->max.y));
int minx = MAX(0,floor(path->min.x));
int dwidth = MIN(dest->w,ceil(path->max.x))-minx;
if (dwidth<1){
return 1;
}
float alias_sz = 1/aliasing;
byte alphaaa=alpha*aliasing;
if (is_mask==2){
alphaaa=255*aliasing;
}
int py=0;
/* loop through the rows of the image. */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (py=miny;py<=maxy;py++) {
bytep line=NULL;
if (is_mask){
line = dest->alpha + py * dest->l + minx;
}
else{
line = calloc(dwidth,1);
}
float * nodes = (float *) malloc(sizeof(float) * path->n);
int pyn;
for (pyn=0;pyn<alias_sz;pyn++){
float fy = ((float) py)+(((float) pyn)*aliasing);
int i, n=0, j=path->n-1;
/* find nodes */
for (i=0;i<path->n;i++){
if (
((path->p[i].y<fy)&&(path->p[j].y>=fy))||
((path->p[j].y<fy)&&(path->p[i].y>=fy))
){
nodes[n++] = (
(path->p[i].x+(fy-path->p[i].y)/(path->p[j].y-path->p[i].y)*
(path->p[j].x-path->p[i].x))) - ((float) minx);
}
j = i;
}
/* there is nodes */
if (n>1){
i=0;
while (i<n-1){
if (nodes[i]>nodes[i+1]){
float tmp=nodes[i];
nodes[i]=nodes[i+1];
nodes[i+1]=tmp;
if (i>0){
i--;
}
}
else{
i++;
}
}
/* process alpha values */
for (i=0;i<n;i+=2){
if (nodes[i]>=dwidth){
break;
}
if (nodes[i+1]>0){
if (nodes[i]<0){
nodes[i]=0;
}
if (nodes[i+1]>dwidth){
nodes[i+1]=dwidth;
}
}
else{
continue;
}
if (nodes[i+1]-nodes[i]<1){
continue;
}
if (aliasing==1){
int linex=(int) floor(nodes[i]);
int linew=((int) floor(nodes[i+1]))-linex;
memset(line+linex,alpha,linew);
}
else{
int px;
/* left & right aliasing */
int linex=floor(nodes[i]);
int linerx=floor(nodes[i+1]);
if (is_mask!=2){
line[linex]=
MIN(255,line[linex]+(1.0-fmod(nodes[i],1))*alphaaa);
line[linerx]=
MIN(255,line[linerx]+fmod(nodes[i+1],1)*alphaaa);
}
else{
line[linex]=
MAX(0,((int) line[linex])-(1.0-fmod(nodes[i],1))*alphaaa);
line[linerx]=
MAX(0,((int) line[linerx])-fmod(nodes[i+1],1)*alphaaa);
}
linex++;
int linew=linerx-linex;
if (linew<1){
continue;
}
bytep cline=line+linex;
int left=linew;
#ifdef __ARM_NEON__
left=linew%8;
if (linew>=8){
uint8x8_t ro = vmov_n_u8(alphaaa);
if (is_mask!=2){
uint16x8_t v255 = vdupq_n_u16(alpha);
for (px=0;px<linew-left;px+=8) {
uint8x8_t op = vld1_u8(cline+px);
vst1_u8(cline+px,
vmovn_u16(vminq_u16(vaddl_u8(op, ro),v255)));
}
}
else{
uint8x8_t v0 = vmov_n_u8(0);
for (px=0;px<linew-left;px+=8) {
uint8x8_t op = vld1_u8(cline+px);
vst1_u8(cline+px, vmax_u8(vsub_u8(op,ro),v0));
}
}
}
#endif
if (is_mask!=2){
for (px=linew-left;px<linew;px++){
cline[px]=MIN(alpha,cline[px]+alphaaa);
}
}
else{
for (px=linew-left;px<linew;px++){
cline[px]=MAX(0,((int) cline[px])-alphaaa);
}
}
}
}
}
}
free(nodes);
if (!is_mask){
/* process */
if (line!=NULL){
wordp color_line = dest->data + py * dest->l + minx;
libaroma_alpha_mono(dwidth,color_line,color_line,color,line);
free(line);
}
}
}
}
return 1;
} /* End of libaroma_path_draw */
#endif /* __libaroma_path_c__ */
|
openmp_control.c | /**
*
* @file runtime_control.c
*
* @copyright 2009-2014 The University of Tennessee and The University of
* Tennessee Research Foundation. All rights reserved.
* @copyright 2012-2017 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
* Univ. Bordeaux. All rights reserved.
* @copyright 2018 King Abdullah University of Science and Technology (KAUST).
* All rights reserved.
*
***
* @brief AL4SAN OpenMP control routines
*
* AL4SAN is a software package provided by King Abdullah University of Science and Technology (KAUST)
*
*
* version 1.1.0
* author Vijay Joshi
* author Cedric Castagnede
* date 2012-09-15
*
* @version 1.1.0
* @author Rabab Alomairy
* @date 2019-02-06
*/
#include <stdio.h>
#include <stdlib.h>
#include "al4san_openmp.h"
/*******************************************************************************
* Initialize AL4SAN
**/
int AL4SAN_Openmp_init(AL4SAN_context_t *al4san,
int ncpus,
int ncudas,
int nthreads_per_worker)
{
int hres = 0;
if ( ncudas > 0 )
al4san_warning( "AL4SAN_Openmp_init_scheduler(OpenMP)", "GPUs are not supported for now");
if ( nthreads_per_worker > 0 )
al4san_warning( "AL4SAN_Openmp_init_scheduler(OpenMP)", "Multi-threaded kernels are not supported for now");
omp_set_num_threads(ncpus);
al4san->world_size=ncpus;
al4san->parallel_enabled = AL4SAN_TRUE;
return hres;
}
/*******************************************************************************
* Finalize AL4SAN
*/
void AL4SAN_Openmp_finalize(AL4SAN_context_t *al4san)
{
#pragma omp taskwait
(void)al4san;
return;
}
/*******************************************************************************
* To suspend the processing of new tasks by workers
**/
void AL4SAN_Openmp_pause( AL4SAN_context_t *al4san )
{
(void)al4san;
return;
}
/*******************************************************************************
* This is the symmetrical call to AL4SAN_runtime_pause,
* used to resume the workers polling for new tasks.
**/
void AL4SAN_Openmp_resume( AL4SAN_context_t *al4san )
{
(void)al4san;
return;
}
/*******************************************************************************
* Barrier AL4SAN.
**/
void AL4SAN_Openmp_barrier(AL4SAN_context_t *al4san)
{
// #pragma omp taskwait
#pragma omp barrier
(void)al4san;
}
//I added it, don't forget to add it to runrime.h
void AL4SAN_Openmp_barrier_on(AL4SAN_context_t *al4san, double *ptr)
{
#pragma omp barrier
(void)al4san;
}
/**
* Display a progress information when executing the tasks
*/
void AL4SAN_Openmp_progress( AL4SAN_context_t *al4san )
{
(void)al4san;
return;
}
/*******************************************************************************
* Thread rank.
**/
int AL4SAN_Openmp_thread_rank(AL4SAN_context_t *al4san)
{
int thread_rank = omp_get_thread_num();
(void)al4san;
return thread_rank;
}
/**
* Number of threads.
*/
int AL4SAN_Openmp_thread_size( AL4SAN_context_t *al4san )
{
(void)al4san;
int nworkers;
#pragma omp parallel
{
nworkers=omp_get_num_threads();
}
return nworkers;
}
/**
* The process rank
*/
int AL4SAN_Openmp_comm_rank( AL4SAN_context_t *al4san )
{
(void)al4san;
return omp_get_thread_num();
}
/**
* This returns the size of the distributed computation
*/
int AL4SAN_Openmp_comm_size( AL4SAN_context_t *al4san )
{
(void)al4san;
return 1;
}
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __aarch64__
const int w = bottom_blob.w;
#endif
const int outw = top_blob.w;
const int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
#if __aarch64__
float* outptr1 = out.row(1);
const float* r5 = img0.row(5);
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum02 = _bias0;
float32x4_t _sum03 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _sum12 = _bias0;
float32x4_t _sum13 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
_sum02 = vmlaq_f32(_sum02, _k00, _r02);
_sum02 = vmlaq_f32(_sum02, _k01, _r03);
_sum02 = vmlaq_f32(_sum02, _k02, _r04);
_sum02 = vmlaq_f32(_sum02, _k03, _r05);
_sum02 = vmlaq_f32(_sum02, _k04, _r06);
_sum03 = vmlaq_f32(_sum03, _k00, _r03);
_sum03 = vmlaq_f32(_sum03, _k01, _r04);
_sum03 = vmlaq_f32(_sum03, _k02, _r05);
_sum03 = vmlaq_f32(_sum03, _k03, _r06);
_sum03 = vmlaq_f32(_sum03, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _r16 = vld1q_f32(r1 + 24);
float32x4_t _r17 = vld1q_f32(r1 + 28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum12 = vmlaq_f32(_sum12, _k00, _r12);
_sum12 = vmlaq_f32(_sum12, _k01, _r13);
_sum12 = vmlaq_f32(_sum12, _k02, _r14);
_sum12 = vmlaq_f32(_sum12, _k03, _r15);
_sum12 = vmlaq_f32(_sum12, _k04, _r16);
_sum13 = vmlaq_f32(_sum13, _k00, _r13);
_sum13 = vmlaq_f32(_sum13, _k01, _r14);
_sum13 = vmlaq_f32(_sum13, _k02, _r15);
_sum13 = vmlaq_f32(_sum13, _k03, _r16);
_sum13 = vmlaq_f32(_sum13, _k04, _r17);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
_sum02 = vmlaq_f32(_sum02, _k10, _r12);
_sum02 = vmlaq_f32(_sum02, _k11, _r13);
_sum02 = vmlaq_f32(_sum02, _k12, _r14);
_sum02 = vmlaq_f32(_sum02, _k13, _r15);
_sum02 = vmlaq_f32(_sum02, _k14, _r16);
_sum03 = vmlaq_f32(_sum03, _k10, _r13);
_sum03 = vmlaq_f32(_sum03, _k11, _r14);
_sum03 = vmlaq_f32(_sum03, _k12, _r15);
_sum03 = vmlaq_f32(_sum03, _k13, _r16);
_sum03 = vmlaq_f32(_sum03, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _r26 = vld1q_f32(r2 + 24);
float32x4_t _r27 = vld1q_f32(r2 + 28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum12 = vmlaq_f32(_sum12, _k10, _r22);
_sum12 = vmlaq_f32(_sum12, _k11, _r23);
_sum12 = vmlaq_f32(_sum12, _k12, _r24);
_sum12 = vmlaq_f32(_sum12, _k13, _r25);
_sum12 = vmlaq_f32(_sum12, _k14, _r26);
_sum13 = vmlaq_f32(_sum13, _k10, _r23);
_sum13 = vmlaq_f32(_sum13, _k11, _r24);
_sum13 = vmlaq_f32(_sum13, _k12, _r25);
_sum13 = vmlaq_f32(_sum13, _k13, _r26);
_sum13 = vmlaq_f32(_sum13, _k14, _r27);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
_sum02 = vmlaq_f32(_sum02, _k20, _r22);
_sum02 = vmlaq_f32(_sum02, _k21, _r23);
_sum02 = vmlaq_f32(_sum02, _k22, _r24);
_sum02 = vmlaq_f32(_sum02, _k23, _r25);
_sum02 = vmlaq_f32(_sum02, _k24, _r26);
_sum03 = vmlaq_f32(_sum03, _k20, _r23);
_sum03 = vmlaq_f32(_sum03, _k21, _r24);
_sum03 = vmlaq_f32(_sum03, _k22, _r25);
_sum03 = vmlaq_f32(_sum03, _k23, _r26);
_sum03 = vmlaq_f32(_sum03, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _r36 = vld1q_f32(r3 + 24);
float32x4_t _r37 = vld1q_f32(r3 + 28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum12 = vmlaq_f32(_sum12, _k20, _r32);
_sum12 = vmlaq_f32(_sum12, _k21, _r33);
_sum12 = vmlaq_f32(_sum12, _k22, _r34);
_sum12 = vmlaq_f32(_sum12, _k23, _r35);
_sum12 = vmlaq_f32(_sum12, _k24, _r36);
_sum13 = vmlaq_f32(_sum13, _k20, _r33);
_sum13 = vmlaq_f32(_sum13, _k21, _r34);
_sum13 = vmlaq_f32(_sum13, _k22, _r35);
_sum13 = vmlaq_f32(_sum13, _k23, _r36);
_sum13 = vmlaq_f32(_sum13, _k24, _r37);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
_sum02 = vmlaq_f32(_sum02, _k30, _r32);
_sum02 = vmlaq_f32(_sum02, _k31, _r33);
_sum02 = vmlaq_f32(_sum02, _k32, _r34);
_sum02 = vmlaq_f32(_sum02, _k33, _r35);
_sum02 = vmlaq_f32(_sum02, _k34, _r36);
_sum03 = vmlaq_f32(_sum03, _k30, _r33);
_sum03 = vmlaq_f32(_sum03, _k31, _r34);
_sum03 = vmlaq_f32(_sum03, _k32, _r35);
_sum03 = vmlaq_f32(_sum03, _k33, _r36);
_sum03 = vmlaq_f32(_sum03, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _r46 = vld1q_f32(r4 + 24);
float32x4_t _r47 = vld1q_f32(r4 + 28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum12 = vmlaq_f32(_sum12, _k30, _r42);
_sum12 = vmlaq_f32(_sum12, _k31, _r43);
_sum12 = vmlaq_f32(_sum12, _k32, _r44);
_sum12 = vmlaq_f32(_sum12, _k33, _r45);
_sum12 = vmlaq_f32(_sum12, _k34, _r46);
_sum13 = vmlaq_f32(_sum13, _k30, _r43);
_sum13 = vmlaq_f32(_sum13, _k31, _r44);
_sum13 = vmlaq_f32(_sum13, _k32, _r45);
_sum13 = vmlaq_f32(_sum13, _k33, _r46);
_sum13 = vmlaq_f32(_sum13, _k34, _r47);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
_sum02 = vmlaq_f32(_sum02, _k40, _r42);
_sum02 = vmlaq_f32(_sum02, _k41, _r43);
_sum02 = vmlaq_f32(_sum02, _k42, _r44);
_sum02 = vmlaq_f32(_sum02, _k43, _r45);
_sum02 = vmlaq_f32(_sum02, _k44, _r46);
_sum03 = vmlaq_f32(_sum03, _k40, _r43);
_sum03 = vmlaq_f32(_sum03, _k41, _r44);
_sum03 = vmlaq_f32(_sum03, _k42, _r45);
_sum03 = vmlaq_f32(_sum03, _k43, _r46);
_sum03 = vmlaq_f32(_sum03, _k44, _r47);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5 + 4);
float32x4_t _r52 = vld1q_f32(r5 + 8);
float32x4_t _r53 = vld1q_f32(r5 + 12);
float32x4_t _r54 = vld1q_f32(r5 + 16);
float32x4_t _r55 = vld1q_f32(r5 + 20);
float32x4_t _r56 = vld1q_f32(r5 + 24);
float32x4_t _r57 = vld1q_f32(r5 + 28);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
_sum12 = vmlaq_f32(_sum12, _k40, _r52);
_sum12 = vmlaq_f32(_sum12, _k41, _r53);
_sum12 = vmlaq_f32(_sum12, _k42, _r54);
_sum12 = vmlaq_f32(_sum12, _k43, _r55);
_sum12 = vmlaq_f32(_sum12, _k44, _r56);
_sum13 = vmlaq_f32(_sum13, _k40, _r53);
_sum13 = vmlaq_f32(_sum13, _k41, _r54);
_sum13 = vmlaq_f32(_sum13, _k42, _r55);
_sum13 = vmlaq_f32(_sum13, _k43, _r56);
_sum13 = vmlaq_f32(_sum13, _k44, _r57);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0 + 4, _sum01);
vst1q_f32(outptr0 + 8, _sum02);
vst1q_f32(outptr0 + 12, _sum03);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1 + 4, _sum11);
vst1q_f32(outptr1 + 8, _sum12);
vst1q_f32(outptr1 + 12, _sum13);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
r5 += 16;
outptr0 += 16;
outptr1 += 16;
}
for (; j + 1 < outw; j += 2)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5 + 4);
float32x4_t _r52 = vld1q_f32(r5 + 8);
float32x4_t _r53 = vld1q_f32(r5 + 12);
float32x4_t _r54 = vld1q_f32(r5 + 16);
float32x4_t _r55 = vld1q_f32(r5 + 20);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0 + 4, _sum01);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1 + 4, _sum11);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
outptr0 += 8;
outptr1 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k00, _r10);
_sum1 = vmlaq_f32(_sum1, _k01, _r11);
_sum1 = vmlaq_f32(_sum1, _k02, _r12);
_sum1 = vmlaq_f32(_sum1, _k03, _r13);
_sum1 = vmlaq_f32(_sum1, _k04, _r14);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k10, _r20);
_sum1 = vmlaq_f32(_sum1, _k11, _r21);
_sum1 = vmlaq_f32(_sum1, _k12, _r22);
_sum1 = vmlaq_f32(_sum1, _k13, _r23);
_sum1 = vmlaq_f32(_sum1, _k14, _r24);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k20, _r30);
_sum1 = vmlaq_f32(_sum1, _k21, _r31);
_sum1 = vmlaq_f32(_sum1, _k22, _r32);
_sum1 = vmlaq_f32(_sum1, _k23, _r33);
_sum1 = vmlaq_f32(_sum1, _k24, _r34);
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum1 = vmlaq_f32(_sum1, _k30, _r40);
_sum1 = vmlaq_f32(_sum1, _k31, _r41);
_sum1 = vmlaq_f32(_sum1, _k32, _r42);
_sum1 = vmlaq_f32(_sum1, _k33, _r43);
_sum1 = vmlaq_f32(_sum1, _k34, _r44);
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5 + 4);
float32x4_t _r52 = vld1q_f32(r5 + 8);
float32x4_t _r53 = vld1q_f32(r5 + 12);
float32x4_t _r54 = vld1q_f32(r5 + 16);
_sum1 = vmlaq_f32(_sum1, _k40, _r50);
_sum1 = vmlaq_f32(_sum1, _k41, _r51);
_sum1 = vmlaq_f32(_sum1, _k42, _r52);
_sum1 = vmlaq_f32(_sum1, _k43, _r53);
_sum1 = vmlaq_f32(_sum1, _k44, _r54);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
outptr0 += 4;
outptr1 += 4;
}
r0 += 4 * 4 + w * 4;
r1 += 4 * 4 + w * 4;
r2 += 4 * 4 + w * 4;
r3 += 4 * 4 + w * 4;
r4 += 4 * 4 + w * 4;
r5 += 4 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
_sum2 = vmlaq_f32(_sum2, _k00, _r02);
_sum2 = vmlaq_f32(_sum2, _k01, _r03);
_sum2 = vmlaq_f32(_sum2, _k02, _r04);
_sum2 = vmlaq_f32(_sum2, _k03, _r05);
_sum2 = vmlaq_f32(_sum2, _k04, _r06);
_sum3 = vmlaq_f32(_sum3, _k00, _r03);
_sum3 = vmlaq_f32(_sum3, _k01, _r04);
_sum3 = vmlaq_f32(_sum3, _k02, _r05);
_sum3 = vmlaq_f32(_sum3, _k03, _r06);
_sum3 = vmlaq_f32(_sum3, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _r16 = vld1q_f32(r1 + 24);
float32x4_t _r17 = vld1q_f32(r1 + 28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
_sum2 = vmlaq_f32(_sum2, _k10, _r12);
_sum2 = vmlaq_f32(_sum2, _k11, _r13);
_sum2 = vmlaq_f32(_sum2, _k12, _r14);
_sum2 = vmlaq_f32(_sum2, _k13, _r15);
_sum2 = vmlaq_f32(_sum2, _k14, _r16);
_sum3 = vmlaq_f32(_sum3, _k10, _r13);
_sum3 = vmlaq_f32(_sum3, _k11, _r14);
_sum3 = vmlaq_f32(_sum3, _k12, _r15);
_sum3 = vmlaq_f32(_sum3, _k13, _r16);
_sum3 = vmlaq_f32(_sum3, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _r26 = vld1q_f32(r2 + 24);
float32x4_t _r27 = vld1q_f32(r2 + 28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
_sum2 = vmlaq_f32(_sum2, _k20, _r22);
_sum2 = vmlaq_f32(_sum2, _k21, _r23);
_sum2 = vmlaq_f32(_sum2, _k22, _r24);
_sum2 = vmlaq_f32(_sum2, _k23, _r25);
_sum2 = vmlaq_f32(_sum2, _k24, _r26);
_sum3 = vmlaq_f32(_sum3, _k20, _r23);
_sum3 = vmlaq_f32(_sum3, _k21, _r24);
_sum3 = vmlaq_f32(_sum3, _k22, _r25);
_sum3 = vmlaq_f32(_sum3, _k23, _r26);
_sum3 = vmlaq_f32(_sum3, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _r36 = vld1q_f32(r3 + 24);
float32x4_t _r37 = vld1q_f32(r3 + 28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
_sum2 = vmlaq_f32(_sum2, _k30, _r32);
_sum2 = vmlaq_f32(_sum2, _k31, _r33);
_sum2 = vmlaq_f32(_sum2, _k32, _r34);
_sum2 = vmlaq_f32(_sum2, _k33, _r35);
_sum2 = vmlaq_f32(_sum2, _k34, _r36);
_sum3 = vmlaq_f32(_sum3, _k30, _r33);
_sum3 = vmlaq_f32(_sum3, _k31, _r34);
_sum3 = vmlaq_f32(_sum3, _k32, _r35);
_sum3 = vmlaq_f32(_sum3, _k33, _r36);
_sum3 = vmlaq_f32(_sum3, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _r46 = vld1q_f32(r4 + 24);
float32x4_t _r47 = vld1q_f32(r4 + 28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
_sum2 = vmlaq_f32(_sum2, _k40, _r42);
_sum2 = vmlaq_f32(_sum2, _k41, _r43);
_sum2 = vmlaq_f32(_sum2, _k42, _r44);
_sum2 = vmlaq_f32(_sum2, _k43, _r45);
_sum2 = vmlaq_f32(_sum2, _k44, _r46);
_sum3 = vmlaq_f32(_sum3, _k40, _r43);
_sum3 = vmlaq_f32(_sum3, _k41, _r44);
_sum3 = vmlaq_f32(_sum3, _k42, _r45);
_sum3 = vmlaq_f32(_sum3, _k43, _r46);
_sum3 = vmlaq_f32(_sum3, _k44, _r47);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
vst1q_f32(outptr0 + 8, _sum2);
vst1q_f32(outptr0 + 12, _sum3);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
outptr0 += 4;
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _r08 = vld1q_f32(r0 + 32);
float32x4_t _r09 = vld1q_f32(r0 + 36);
float32x4_t _r010 = vld1q_f32(r0 + 40);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
_sum2 = vmlaq_f32(_sum2, _k00, _r04);
_sum2 = vmlaq_f32(_sum2, _k01, _r05);
_sum2 = vmlaq_f32(_sum2, _k02, _r06);
_sum2 = vmlaq_f32(_sum2, _k03, _r07);
_sum2 = vmlaq_f32(_sum2, _k04, _r08);
_sum3 = vmlaq_f32(_sum3, _k00, _r06);
_sum3 = vmlaq_f32(_sum3, _k01, _r07);
_sum3 = vmlaq_f32(_sum3, _k02, _r08);
_sum3 = vmlaq_f32(_sum3, _k03, _r09);
_sum3 = vmlaq_f32(_sum3, _k04, _r010);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _r16 = vld1q_f32(r1 + 24);
float32x4_t _r17 = vld1q_f32(r1 + 28);
float32x4_t _r18 = vld1q_f32(r1 + 32);
float32x4_t _r19 = vld1q_f32(r1 + 36);
float32x4_t _r110 = vld1q_f32(r1 + 40);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
_sum2 = vmlaq_f32(_sum2, _k10, _r14);
_sum2 = vmlaq_f32(_sum2, _k11, _r15);
_sum2 = vmlaq_f32(_sum2, _k12, _r16);
_sum2 = vmlaq_f32(_sum2, _k13, _r17);
_sum2 = vmlaq_f32(_sum2, _k14, _r18);
_sum3 = vmlaq_f32(_sum3, _k10, _r16);
_sum3 = vmlaq_f32(_sum3, _k11, _r17);
_sum3 = vmlaq_f32(_sum3, _k12, _r18);
_sum3 = vmlaq_f32(_sum3, _k13, _r19);
_sum3 = vmlaq_f32(_sum3, _k14, _r110);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _r26 = vld1q_f32(r2 + 24);
float32x4_t _r27 = vld1q_f32(r2 + 28);
float32x4_t _r28 = vld1q_f32(r2 + 32);
float32x4_t _r29 = vld1q_f32(r2 + 36);
float32x4_t _r210 = vld1q_f32(r2 + 40);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
_sum2 = vmlaq_f32(_sum2, _k20, _r24);
_sum2 = vmlaq_f32(_sum2, _k21, _r25);
_sum2 = vmlaq_f32(_sum2, _k22, _r26);
_sum2 = vmlaq_f32(_sum2, _k23, _r27);
_sum2 = vmlaq_f32(_sum2, _k24, _r28);
_sum3 = vmlaq_f32(_sum3, _k20, _r26);
_sum3 = vmlaq_f32(_sum3, _k21, _r27);
_sum3 = vmlaq_f32(_sum3, _k22, _r28);
_sum3 = vmlaq_f32(_sum3, _k23, _r29);
_sum3 = vmlaq_f32(_sum3, _k24, _r210);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _r36 = vld1q_f32(r3 + 24);
float32x4_t _r37 = vld1q_f32(r3 + 28);
float32x4_t _r38 = vld1q_f32(r3 + 32);
float32x4_t _r39 = vld1q_f32(r3 + 36);
float32x4_t _r310 = vld1q_f32(r3 + 40);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
_sum2 = vmlaq_f32(_sum2, _k30, _r34);
_sum2 = vmlaq_f32(_sum2, _k31, _r35);
_sum2 = vmlaq_f32(_sum2, _k32, _r36);
_sum2 = vmlaq_f32(_sum2, _k33, _r37);
_sum2 = vmlaq_f32(_sum2, _k34, _r38);
_sum3 = vmlaq_f32(_sum3, _k30, _r36);
_sum3 = vmlaq_f32(_sum3, _k31, _r37);
_sum3 = vmlaq_f32(_sum3, _k32, _r38);
_sum3 = vmlaq_f32(_sum3, _k33, _r39);
_sum3 = vmlaq_f32(_sum3, _k34, _r310);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _r46 = vld1q_f32(r4 + 24);
float32x4_t _r47 = vld1q_f32(r4 + 28);
float32x4_t _r48 = vld1q_f32(r4 + 32);
float32x4_t _r49 = vld1q_f32(r4 + 36);
float32x4_t _r410 = vld1q_f32(r4 + 40);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
_sum2 = vmlaq_f32(_sum2, _k40, _r44);
_sum2 = vmlaq_f32(_sum2, _k41, _r45);
_sum2 = vmlaq_f32(_sum2, _k42, _r46);
_sum2 = vmlaq_f32(_sum2, _k43, _r47);
_sum2 = vmlaq_f32(_sum2, _k44, _r48);
_sum3 = vmlaq_f32(_sum3, _k40, _r46);
_sum3 = vmlaq_f32(_sum3, _k41, _r47);
_sum3 = vmlaq_f32(_sum3, _k42, _r48);
_sum3 = vmlaq_f32(_sum3, _k43, _r49);
_sum3 = vmlaq_f32(_sum3, _k44, _r410);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
vst1q_f32(outptr0 + 8, _sum2);
vst1q_f32(outptr0 + 12, _sum3);
r0 += 8 * 4;
r1 += 8 * 4;
r2 += 8 * 4;
r3 += 8 * 4;
r4 += 8 * 4;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r15 = vld1q_f32(r1 + 20);
float32x4_t _r16 = vld1q_f32(r1 + 24);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _r25 = vld1q_f32(r2 + 20);
float32x4_t _r26 = vld1q_f32(r2 + 24);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _r35 = vld1q_f32(r3 + 20);
float32x4_t _r36 = vld1q_f32(r3 + 24);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _r45 = vld1q_f32(r4 + 20);
float32x4_t _r46 = vld1q_f32(r4 + 24);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k03 = vld1q_f32(k0 + 12);
float32x4_t _k04 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0 + 4);
float32x4_t _k12 = vld1q_f32(k0 + 8);
float32x4_t _k13 = vld1q_f32(k0 + 12);
float32x4_t _k14 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0 + 4);
float32x4_t _k22 = vld1q_f32(k0 + 8);
float32x4_t _k23 = vld1q_f32(k0 + 12);
float32x4_t _k24 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _r34 = vld1q_f32(r3 + 16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0 + 4);
float32x4_t _k32 = vld1q_f32(k0 + 8);
float32x4_t _k33 = vld1q_f32(k0 + 12);
float32x4_t _k34 = vld1q_f32(k0 + 16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4 + 4);
float32x4_t _r42 = vld1q_f32(r4 + 8);
float32x4_t _r43 = vld1q_f32(r4 + 12);
float32x4_t _r44 = vld1q_f32(r4 + 16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0 + 4);
float32x4_t _k42 = vld1q_f32(k0 + 8);
float32x4_t _k43 = vld1q_f32(k0 + 12);
float32x4_t _k44 = vld1q_f32(k0 + 16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
r3 += 2 * 4;
r4 += 2 * 4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#if defined(__AVX2__) && defined (__SSE2__)
#define USEAVX2
#endif
#if defined(__SSE2__)
#define USESSE2
#endif
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#endif
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose 8x8 bit array along the diagonal from upper right
to lower left */
#define TRANS_BIT_8X8_BE(x, t) { \
t = (x ^ (x >> 9)) & 0x0055005500550055LL; \
x = x ^ t ^ (t << 9); \
t = (x ^ (x >> 18)) & 0x0000333300003333LL; \
x = x ^ t ^ (t << 18); \
t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \
x = x ^ t ^ (t << 36); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t ii, kk;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow;
const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk ++) {
out_b[bit_row_offset + kk * bit_row_skip + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
const char *in_b;
char *out_b;
uint64_t x, t;
size_t ii, jj, kk;
size_t nbyte, out_index;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t elem_skip = little_endian ? elem_size : -elem_size;
const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk++) {
out_index = ii + jj / 8 + elem_offset + kk * elem_skip;
*((uint8_t*) &out_b[out_index]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
size_t ii;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < size / block_size; ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
GB_unaryop__abs_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint32
// op(A') function: GB_tran__abs_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_uint8
// op(A') function: GB_tran__ainv_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_uint8
(
bool *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
loop-4.c | /* { dg-do run } */
extern void abort (void);
int
main (void)
{
int e = 0;
#pragma omp parallel num_threads (4) reduction(+:e)
{
long i;
#pragma omp for schedule(dynamic,1)
for (i = __LONG_MAX__ - 30001; i <= __LONG_MAX__ - 10001; i += 10000)
if (i != __LONG_MAX__ - 30001
&& i != __LONG_MAX__ - 20001
&& i != __LONG_MAX__ - 10001)
e = 1;
#pragma omp for schedule(dynamic,1)
for (i = -__LONG_MAX__ + 30000; i >= -__LONG_MAX__ + 10000; i -= 10000)
if (i != -__LONG_MAX__ + 30000
&& i != -__LONG_MAX__ + 20000
&& i != -__LONG_MAX__ + 10000)
e = 1;
}
if (e)
abort ();
return 0;
}
|
events.h | /*****************************************************************************\
* ANALYSIS PERFORMANCE TOOLS *
* Extrae *
* Instrumentation package for parallel applications *
*****************************************************************************
* ___ This library is free software; you can redistribute it and/or *
* / __ modify it under the terms of the GNU LGPL as published *
* / / _____ by the Free Software Foundation; either version 2.1 *
* / / / \ of the License, or (at your option) any later version. *
* ( ( ( B S C ) *
* \ \ \_____/ This library is distributed in hope that it will be *
* \ \__ useful but WITHOUT ANY WARRANTY; without even the *
* \___ implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU LGPL for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
* The GNU LEsser General Public License is contained in the file COPYING. *
* --------- *
* Barcelona Supercomputing Center - Centro Nacional de Supercomputacion *
\*****************************************************************************/
#ifndef __EVENTS_H_INCLUDED__
#define __EVENTS_H_INCLUDED__
#include "openshmem_events.h"
#ifdef __cplusplus
extern "C" {
#endif
unsigned IsMPI (unsigned EvType);
unsigned IsOpenMP (unsigned EvType);
unsigned IsMISC (unsigned EvType);
unsigned IsBurst (unsigned EvType);
unsigned IsHwcChange (unsigned EvType);
unsigned IsMPICollective (unsigned EvType);
#ifdef __cplusplus
}
#endif
#define EMPTY ( 0)
#define NO_COUNTER (-1)
#define SAMPLE_COUNTER (-2)
#define MAX_CALLERS 100
/******************************************************************************
* General user events to trace the application.
******************************************************************************/
#define NULL_EV -1
/* Values */
#define EVT_BEGIN 1
#define EVT_END 0
/* Trace options, just do a bitwise or/and with these values */
#define TRACEOPTION_NONE (0)
#define TRACEOPTION_HWC (1<<0)
#define TRACEOPTION_CIRCULAR_BUFFER (1<<1)
/* Useless #define TRACEOPTION_BURSTS (1<<2) */
#define TRACEOPTION_BIGENDIAN (1<<3)
#define TRACEOPTION_PARAVER (1<<4)
#define TRACEOPTION_DIMEMAS (1<<5)
/* These trace options are intended to 'catch' special architectures */
#define TRACEOPTION_UNK_ARCH (1<<10) /* Unknown */
#define TRACEOPTION_MN_ARCH (1<<11) /* MareNostrum */
#define TRACEOPTION_BG_ARCH (1<<12) /* BlueGene / {PL} */
#define SYNCHRONIZATION_POINT_EV 1000
#define OPTIONS_EV 1001
#define SAMPLING_EV 30000000
#define SAMPLING_LINE_EV 30000100
#define SAMPLING_CALLER_OFFSET 100000
#define HWC_SET_OVERFLOW_EV 31000000
#define SAMPLING_ADDRESS_LD_EV 32000000
#define SAMPLING_ADDRESS_ST_EV 32000001
#define SAMPLING_ADDRESS_MEM_LEVEL_EV 32000002
#define SAMPLING_ADDRESS_MEM_HITORMISS_EV 32000003
#define SAMPLING_ADDRESS_TLB_LEVEL_EV 32000004
#define SAMPLING_ADDRESS_TLB_HITORMISS_EV 32000005
#define SAMPLING_ADDRESS_REFERENCE_COST_EV 32000006
#define SAMPLING_ADDRESS_ALLOCATED_OBJECT_EV 32000007
#define SAMPLING_ADDRESS_ALLOCATED_OBJECT_ALLOC_EV 32000009
#define SAMPLING_ADDRESS_STATIC_OBJECT_EV 32000008
#define SAMPLING_ADDRESS_ALLOCATED_OBJECT_CALLER_EV 32000100 /* internal purposes, not emitted into paraver tracefile */
#define SYSCALL_EV 40000000
enum {
SYSCALL_SCHED_YIELD_EV,
SYSCALL_EVENTS_COUNT
};
#define APPL_EV 40000001
#define TRACE_INIT_EV 40000002
#define FLUSH_EV 40000003
#define READ_EV 40000004
#define IO_EV READ_EV /* Used in merger */
#define WRITE_EV 40000005
#define READ_VAL_EV 1 /* Used in merger, with IO_EV */
#define WRITE_VAL_EV 2 /* Used in merger, with IO_EV */
#define FREAD_VAL_EV 3 /* Used in merger, with IO_EV */
#define FWRITE_VAL_EV 4 /* Used in merger, with IO_EV */
#define PREAD_VAL_EV 5 /* Used in merger, with IO_EV */
#define PWRITE_VAL_EV 6 /* Used in merger, with IO_EV */
#define READV_VAL_EV 7 /* Used in merger, with IO_EV */
#define WRITEV_VAL_EV 8 /* Used in merger, with IO_EV */
#define PREADV_VAL_EV 9 /* Used in merger, with IO_EV */
#define PWRITEV_VAL_EV 10 /* Used in merger, with IO_EV */
#define OPEN_VAL_EV 11 /* Used in merger, with IO_EV */
#define FOPEN_VAL_EV 12 /* Used in merger, with IO_EV */
#define USER_EV 40000006
#define HWC_DEF_EV 40000007
#define HWC_CHANGE_EV 40000008
#define HWC_EV 40000009
#define IO_DESCRIPTOR_EV 40000010
#define IO_SIZE_EV 40000011
#define IO_DESCRIPTOR_TYPE_EV 40000013
#define TRACING_EV 40000012
#define SET_TRACE_EV 40000014
#define CPU_BURST_EV 40000015
#define RUSAGE_EV 40000016
#define MPI_STATS_EV 40000017
#define TRACING_MODE_EV 40000018
#define MEMUSAGE_EV 40000020
#define USER_SEND_EV 40000021
#define USER_RECV_EV 40000022
#define RESUME_VIRTUAL_THREAD_EV 40000023
#define SUSPEND_VIRTUAL_THREAD_EV 40000024
#define REGISTER_STACKED_TYPE_EV 40000025
#define REGISTER_CODELOCATION_TYPE_EV 40000026
#define FORK_EV 40000027
#define FORK_SYSCALL_EV FORK_EV
#define WAIT_EV 40000028
#define WAITPID_EV 40000029
#define WAITEDPID_EV 40000030
#define EXEC_EV 40000031
#define EXEC_BIN_EV 40000032
#define GETCPU_EV 40000033
#define CPU_EVENT_INTERVAL_EV 40000133
#define SYSTEM_EV 40000034
#define SYSTEM_BIN_EV 40000035
#define PID_EV 40000036
#define PPID_EV 40000037
#define FORK_DEPTH_EV 40000038
#define LIBRARY_EV 40000039
#define MALLOC_EV 40000040
#define FREE_EV 40000041
#define CALLOC_EV 40000042
#define REALLOC_EV 40000043
#define POSIX_MEMALIGN_EV 40000044
#define MEMKIND_MALLOC_EV 40000045
#define MEMKIND_CALLOC_EV 40000046
#define MEMKIND_REALLOC_EV 40000047
#define MEMKIND_POSIX_MEMALIGN_EV 40000048
#define MEMKIND_FREE_EV 40000049
#define MEMKIND_PARTITION_EV 40001000
#define KMPC_MALLOC_EV 40000062
#define KMPC_FREE_EV 40000063
#define KMPC_CALLOC_EV 40000064
#define KMPC_REALLOC_EV 40000065
#define KMPC_ALIGNED_MALLOC_EV 40000066
enum
{
MEMKIND_PARTITION_DEFAULT_VAL = 1,
MEMKIND_PARTITION_HBW_VAL,
MEMKIND_PARTITION_HBW_HUGETLB_VAL,
MEMKIND_PARTITION_HBW_PREFERRED_VAL,
MEMKIND_PARTITION_HBW_PREFERRED_HUGETLB_VAL,
MEMKIND_PARTITION_HUGETLB_VAL,
MEMKIND_PARTITION_HBW_GBTLB_VAL,
MEMKIND_PARTITION_HBW_PREFERRED_GBTLB_VAL,
MEMKIND_PARTITION_GBTLB_VAL,
MEMKIND_PARTITION_HBW_INTERLEAVE_VAL,
MEMKIND_PARTITION_INTERLEAVE_VAL,
MEMKIND_PARTITION_OTHER_VAL,
};
#define DYNAMIC_MEM_EV MALLOC_EV /* Used in merger only */
#define DYNAMIC_MEM_REQUESTED_SIZE_EV DYNAMIC_MEM_EV+1 /* Used in merger only */
#define DYNAMIC_MEM_POINTER_IN_EV DYNAMIC_MEM_EV+2 /* Used in merger only, free input, realloc in */
#define DYNAMIC_MEM_POINTER_OUT_EV DYNAMIC_MEM_EV+3 /* Used in merger only, malloc output, calloc output, realloc out */
#define CLOCK_FROM_SYSTEM_EV 40000050
#define FREAD_EV 40000051
#define FWRITE_EV 40000052
#define PREAD_EV 40000053
#define PWRITE_EV 40000054
#define READV_EV 40000055
#define WRITEV_EV 40000056
#define PREADV_EV 40000057
#define PWRITEV_EV 40000058
#define FILE_NAME_EV 40000059
#define OPEN_EV 40000060
#define FOPEN_EV 40000061
#define ADDRESSES_FOR_BINARY_EV 41000000
#define RUSAGE_BASE 45000000
enum {
RUSAGE_UTIME_EV = 0,
RUSAGE_STIME_EV,
RUSAGE_MAXRSS_EV,
RUSAGE_IXRSS_EV,
RUSAGE_IDRSS_EV,
RUSAGE_ISRSS_EV,
RUSAGE_MINFLT_EV,
RUSAGE_MAJFLT_EV,
RUSAGE_NSWAP_EV,
RUSAGE_INBLOCK_EV,
RUSAGE_OUBLOCK_EV,
RUSAGE_MSGSND_EV,
RUSAGE_MSGRCV_EV,
RUSAGE_NSIGNALS_EV,
RUSAGE_NVCSW_EV,
RUSAGE_NIVCSW_EV,
RUSAGE_EVENTS_COUNT /* Total number of getrusage events */
};
#define MEMUSAGE_BASE 46000000
enum {
MEMUSAGE_ARENA_EV = 0,
MEMUSAGE_HBLKHD_EV,
MEMUSAGE_UORDBLKS_EV,
MEMUSAGE_FORDBLKS_EV,
MEMUSAGE_INUSE_EV,
MEMUSAGE_EVENTS_COUNT /* Total number of memusage events */
};
#define JAVA_JVMTI_GARBAGECOLLECTOR_EV 48000001
#define JAVA_JVMTI_EXCEPTION_EV 48000002
#define JAVA_JVMTI_OBJECT_ALLOC_EV 48000003
#define JAVA_JVMTI_OBJECT_FREE_EV 48000004
#define OMP_STATS_BASE 65000000
enum {
OMP_NUM_TASKS_INSTANTIATED = 0,
OMP_NUM_TASKS_EXECUTED,
OMP_STATS_EVENTS_COUNT
};
#define MPI_STATS_BASE 54000000
enum {
/* Original stats */
MPI_STATS_P2P_COUNT_EV = 0,
MPI_STATS_P2P_BYTES_SENT_EV,
MPI_STATS_P2P_BYTES_RECV_EV,
MPI_STATS_GLOBAL_COUNT_EV,
MPI_STATS_GLOBAL_BYTES_SENT_EV,
MPI_STATS_GLOBAL_BYTES_RECV_EV,
MPI_STATS_TIME_IN_MPI_EV,
/* New stats */
MPI_STATS_P2P_INCOMING_COUNT_EV,
MPI_STATS_P2P_OUTGOING_COUNT_EV,
MPI_STATS_P2P_INCOMING_PARTNERS_COUNT_EV,
MPI_STATS_P2P_OUTGOING_PARTNERS_COUNT_EV,
MPI_STATS_TIME_IN_OTHER_EV,
MPI_STATS_TIME_IN_P2P_EV,
MPI_STATS_TIME_IN_GLOBAL_EV,
MPI_STATS_OTHER_COUNT_EV,
MPI_STATS_EVENTS_COUNT /* Total number of MPI statistics */
};
#define FUNCT_BASE 41000000
#define FUNCT_MAX 1000
#define HWC_BASE 42000000 /* Base for preset PAPI counters */
#define HWC_BASE_NATIVE 42001000 /* Base for native PAPI counters */
#define HWC_DELTA_ABSOLUTE 1000000 /* Add this if using absolute values */
#define HWC_GROUP_ID 41999999 /* Identifier of the active hwc set */
/******************************************************************************
* User events to trace several MPI functions.
* MUST be between 50000001 - 50999999
******************************************************************************/
#define MPI_MIN_EV MPI_INIT_EV
#define MPI_MAX_EV 50999999
#define MPI_INIT_EV 50000001
#define MPI_BSEND_EV 50000002
#define MPI_SSEND_EV 50000003
#define MPI_BARRIER_EV 50000004
#define MPI_BCAST_EV 50000005
#define MPI_SEND_EV 50000018
#define MPI_SENDRECV_EV 50000017
#define MPI_SENDRECV_REPLACE_EV 50000081
#define MPI_RECV_EV 50000019
#define MPI_IBSEND_EV 50000020
#define MPI_ISSEND_EV 50000021
#define MPI_ISEND_EV 50000022
#define MPI_IRECV_EV 50000023
#define MPI_IRCV_EV 50000025
#define MPI_TEST_EV 50000026
#define MPI_TESTALL_EV 50000082
#define MPI_TESTANY_EV 50000083
#define MPI_TESTSOME_EV 50000084
#define MPI_TEST_COUNTER_EV 50000080
#define MPI_WAIT_EV 50000027
#define MPI_CANCEL_EV 50000030
#define MPI_RSEND_EV 50000031
#define MPI_IRSEND_EV 50000032
#define MPI_ALLTOALL_EV 50000033
#define MPI_ALLTOALLV_EV 50000034
#define MPI_ALLREDUCE_EV 50000035
#define MPI_REDUCE_EV 50000038
#define MPI_WAITALL_EV 50000039
#define MPI_WAITANY_EV 50000068
#define MPI_WAITSOME_EV 50000069
#define MPI_IRECVED_EV 50000040
#define MPI_GATHER_EV 50000041
#define MPI_GATHERV_EV 50000042
#define MPI_SCATTER_EV 50000043
#define MPI_SCATTERV_EV 50000044
#define MPI_FINALIZE_EV 50000045
#define MPI_COMM_RANK_EV 50000046
#define MPI_COMM_SIZE_EV 50000047
#define MPI_COMM_CREATE_EV 50000048
#define MPI_COMM_DUP_EV 50000049
#define MPI_COMM_SPLIT_EV 50000050
#define MPI_COMM_SPAWN_EV 50000054
#define MPI_COMM_SPAWN_MULTIPLE_EV 50000055
#define MPI_RANK_CREACIO_COMM_EV 50000051 /* Used to define communicators */
#define MPI_ALIAS_COMM_CREATE_EV 50000061 /* Used to define communicators */
#define MPI_ALLGATHER_EV 50000052
#define MPI_ALLGATHERV_EV 50000053
#define MPI_CART_CREATE_EV 50000058
#define MPI_CART_SUB_EV 50000059
#define MPI_CART_COORDS_EV 50000060
#define MPI_REDUCESCAT_EV 50000062
#define MPI_SCAN_EV 50000063
#define MPI_PROBE_EV 50000065
#define MPI_IPROBE_EV 50000066
#define MPI_COMM_FREE_EV 50000067
#define MPI_PERSIST_REQ_EV 50000070
#define MPI_START_EV 50000071
#define MPI_STARTALL_EV 50000072
#define MPI_REQUEST_FREE_EV 50000073
#define MPI_RECV_INIT_EV 50000074
#define MPI_SEND_INIT_EV 50000075
#define MPI_BSEND_INIT_EV 50000076
#define MPI_RSEND_INIT_EV 50000077
#define MPI_SSEND_INIT_EV 50000078
#define MPI_REQUEST_GET_STATUS_EV 50000079
#define MPI_INTERCOMM_CREATE_EV 50000085
#define MPI_INTERCOMM_MERGE_EV 50000086
#define MPI_GLOBAL_OP_SENDSIZE (MPI_INIT_EV+100000)
#define MPI_GLOBAL_OP_RECVSIZE (MPI_INIT_EV+100001)
#define MPI_GLOBAL_OP_ROOT (MPI_INIT_EV+100002)
#define MPI_GLOBAL_OP_COMM (MPI_INIT_EV+100003)
#define MPI_FILE_OPEN_EV 50000100
#define MPI_FILE_CLOSE_EV 50000101
#define MPI_FILE_READ_EV 50000102
#define MPI_FILE_READ_ALL_EV 50000103
#define MPI_FILE_WRITE_EV 50000104
#define MPI_FILE_WRITE_ALL_EV 50000105
#define MPI_FILE_READ_AT_EV 50000106
#define MPI_FILE_READ_AT_ALL_EV 50000107
#define MPI_FILE_WRITE_AT_EV 50000108
#define MPI_FILE_WRITE_AT_ALL_EV 50000109
#define MPI_IO_SIZE_EV 50000110
#define MPI_GET_EV 50000200
#define MPI_PUT_EV 50000201
#define MPI_WIN_CREATE_EV 50000202
#define MPI_WIN_FENCE_EV 50000203
#define MPI_WIN_START_EV 50000204
#define MPI_WIN_FREE_EV 50000205
#define MPI_WIN_POST_EV 50000206
#define MPI_WIN_COMPLETE_EV 50000207
#define MPI_WIN_WAIT_EV 50000208
#define MPI_RMA_SIZE 50001000
#define MPI_RMA_TARGET_RANK 50001001
#define MPI_RMA_ORIGIN_ADDR 50001002
#define MPI_RMA_TARGET_DISP 50001003
#define MPI_IREDUCE_EV 50000210
#define MPI_IALLREDUCE_EV 50000211
#define MPI_IBARRIER_EV 50000212
#define MPI_IBCAST_EV 50000213
#define MPI_IALLTOALL_EV 50000214
#define MPI_IALLTOALLV_EV 50000215
#define MPI_IALLGATHER_EV 50000216
#define MPI_IALLGATHERV_EV 50000217
#define MPI_IGATHER_EV 50000218
#define MPI_IGATHERV_EV 50000219
#define MPI_ISCATTER_EV 50000220
#define MPI_ISCATTERV_EV 50000221
#define MPI_IREDUCESCAT_EV 50000222
#define MPI_ISCAN_EV 50000223
#define MPI_REDUCE_SCATTER_BLOCK_EV 50000224
#define MPI_IREDUCE_SCATTER_BLOCK_EV 50000225
#define MPI_ALLTOALLW_EV 50000226
#define MPI_IALLTOALLW_EV 50000227
#define MPI_WIN_LOCK_EV 50000228
#define MPI_WIN_UNLOCK_EV 50000229
#define MPI_GET_ACCUMULATE_EV 50000230
#define MPI_IPROBE_COUNTER_EV 50000300
#define MPI_TIME_OUTSIDE_IPROBES_EV 50000301
#define MPI_REQUEST_GET_STATUS_COUNTER_EV 50000302
#define MPI_TIME_OUTSIDE_MPI_REQUEST_GET_STATUS_EV 50000303
#define MPI_TIME_OUTSIDE_TESTS_EV 50000304
/******************************************************************************
* User events for BG PERSONALITY
******************************************************************************/
#define BG_PERSONALITY_PROCESSOR_ID 6000
#define BG_PERSONALITY_TORUS_A 6001 /* For BG/Q */
#define BG_PERSONALITY_TORUS_B 6002 /* For BG/Q */
#define BG_PERSONALITY_TORUS_C 6003 /* For BG/Q */
#define BG_PERSONALITY_TORUS_D 6004 /* For BG/Q */
#define BG_PERSONALITY_TORUS_E 6005 /* For BG/Q */
#define BG_PERSONALITY_TORUS_X 6001 /* For BG/L & BG/P */
#define BG_PERSONALITY_TORUS_Y 6002 /* For BG/L & BG/P */
#define BG_PERSONALITY_TORUS_Z 6003 /* For BG/L & BG/P */
/******************************************************************************
* User events to trace MN topology (grodrigu)
******************************************************************************/
#define MN_LINEAR_HOST_EVENT 3000
#define MN_LINECARD_EVENT 3001
#define MN_HOST_EVENT 3002
/******************************************************************************
* User events to trace OMP parallel execution.
******************************************************************************/
#define PAR_EV 60000001
#define WSH_EV 60000002
#define BLOCK_EV 60000003
#define WWORK_EV 60000004
#define BARRIEROMP_EV 60000005
#define NAMEDCRIT_EV 60000006
#define UNNAMEDCRIT_EV 60000007
#define INTLOCK_EV 60000008
#define OMPLOCK_EV 60000009
#define OVHD_EV 60000010
#define WORK_EV 60000011
#define ENTERGATE_EV 60000012
#define EXITGATE_EV 60000013
#define ORDBEGIN_EV 60000014 /* Not used, actually replaced by ORDERED_EV */
#define ORDEND_EV 60000015 /* Not used, actually replaced by ORDERED_EV */
#define JOIN_EV 60000016
#define DESCMARK_EV 60000017
#define OMPFUNC_EV 60000018
#define OMPFUNC_LINE_EV 60000118
#define USRFUNC_EV 60000019
#define USRFUNC_LINE_EV 60000119
#define USRFUNC_EV_BB 60000219
#define TASK_EV 60000021
#define TASKWAIT_EV 60000022
#define TASKFUNC_EV 60000023
#define TASKFUNC_LINE_EV 60000123
#define TASKFUNC_INST_EV 60000024 /* Task func instantiation */
#define TASKFUNC_INST_LINE_EV 60000124 /* at #pragma omp task */
#define TASKGROUP_START_EV 60000025
#define TASKGROUP_END_EV 60000026
#define TASKGROUP_INGROUP_DEEP_EV 60000027
#define TASKID_EV 60000028
#define TASKLOOP_EV 60000029
#define TASKLOOPID_EV 60010029
#define OMPSETNUMTHREADS_EV 60000030
#define OMPGETNUMTHREADS_EV 60000031
#define NAMEDCRIT_NAME_EV 60000032 /* Critical address name */
#define ORDERED_EV 60000033 /* Ordered section in ordered or doacross loop */
#define OMPT_CRITICAL_EV 60000050
#define OMPT_ATOMIC_EV 60000051
#define OMPT_LOOP_EV 60000052
#define OMPT_WORKSHARE_EV 60000053
#define OMPT_SECTIONS_EV 60000054
#define OMPT_SINGLE_EV 60000055
#define OMPT_MASTER_EV 60000056
#define OMPT_TASKGROUP_IN_EV 60000057
#define OMPT_DEPENDENCE_EV 60000058
#define OMPT_TASKFUNC_EV 60000059
#define OMP_STATS_EV 60000060
/******************************************************************************
* User events to trace Pthread parallel execution.
******************************************************************************/
#define PTHREAD_BASE_EV 61000000
#define PTHREAD_EXIT_EV 61000001
#define PTHREAD_CREATE_EV 61000002
#define PTHREAD_JOIN_EV 61000003
#define PTHREAD_DETACH_EV 61000004
#define PTHREAD_RWLOCK_WR_EV 61000005
#define PTHREAD_RWLOCK_RD_EV 61000006
#define PTHREAD_RWLOCK_UNLOCK_EV 61000007
#define PTHREAD_MUTEX_LOCK_EV 61000008
#define PTHREAD_MUTEX_UNLOCK_EV 61000009
#define PTHREAD_COND_SIGNAL_EV 61000010
#define PTHREAD_COND_BROADCAST_EV 61000011
#define PTHREAD_COND_WAIT_EV 61000012
#define PTHREAD_BARRIER_WAIT_EV 61000013
#define PTHREAD_FUNC_EV 60000020
#define PTHREAD_FUNC_LINE_EV 60000120
#define CUDACALL_EV 63000001
#define CUDAMEMCPY_SIZE_EV 63000002
#define CUDAFUNC_EV 63000019
#define CUDAFUNC_LINE_EV 63000119
#define CUDABASE_EV 63100000
#define CUDALAUNCH_EV 63100001
#define CUDACONFIGCALL_EV 63100002
#define CUDAMEMCPY_EV 63100003
#define CUDATHREADBARRIER_EV 63100004
#define CUDASTREAMBARRIER_EV 63100005
#define CUDASTREAMCREATE_EV 63100006
#define CUDAMEMCPYASYNC_EV 63100007
#define CUDADEVICERESET_EV 63100008
#define CUDATHREADEXIT_EV 63100009
#define CUDABASE_GPU_EV 63200000
#define CUDAKERNEL_GPU_EV 63200001
#define CUDACONFIGKERNEL_GPU_EV 63200002
#define CUDAMEMCPY_GPU_EV 63200003
#define CUDATHREADBARRIER_GPU_EV 63200004
#define CUDAMEMCPYASYNC_GPU_EV 63200007
/* To associate stream with Paraver thread */
#define CUDASTREAMBARRIER_THID_EV 63300000
#define OPENCL_KERNEL_NAME_EV 64200000
#define OPENCL_BASE_TYPE_EV 64000000
#define OPENCL_BASE_TYPE_ACC_EV 64100000
#define OPENCL_CLCREATEBUFFER_EV 64000001
#define OPENCL_CLCREATECOMMANDQUEUE_EV 64000002
#define OPENCL_CLCREATECONTEXT_EV 64000003
#define OPENCL_CLCREATECONTEXTFROMTYPE_EV 64000004
#define OPENCL_CLCREATESUBBUFFER_EV 64000005
#define OPENCL_CLCREATEKERNEL_EV 64000006
#define OPENCL_CLCREATEKERNELSINPROGRAM_EV 64000007
#define OPENCL_CLSETKERNELARG_EV 64000008
#define OPENCL_CLCREATEPROGRAMWITHSOURCE_EV 64000009
#define OPENCL_CLCREATEPROGRAMWITHBINARY_EV 64000010
#define OPENCL_CLCREATEPROGRAMWITHBUILTINKERNELS_EV 64000011
#define OPENCL_CLENQUEUEFILLBUFFER_EV 64000012
#define OPENCL_CLENQUEUECOPYBUFFER_EV 64000013
#define OPENCL_CLENQUEUECOPYBUFFERRECT_EV 64000014
#define OPENCL_CLENQUEUENDRANGEKERNEL_EV 64000015
#define OPENCL_CLENQUEUETASK_EV 64000016
#define OPENCL_CLENQUEUENATIVEKERNEL_EV 64000017
#define OPENCL_CLENQUEUEREADBUFFER_EV 64000018
#define OPENCL_CLENQUEUEREADBUFFERRECT_EV 64000019
#define OPENCL_CLENQUEUEWRITEBUFFER_EV 64000020
#define OPENCL_CLENQUEUEWRITEBUFFERRECT_EV 64000021
#define OPENCL_CLBUILDPROGRAM_EV 64000022
#define OPENCL_CLCOMPILEPROGRAM_EV 64000023
#define OPENCL_CLLINKPROGRAM_EV 64000024
#define OPENCL_CLFINISH_EV 64000025
#define OPENCL_CLFLUSH_EV 64000026
#define OPENCL_CLWAITFOREVENTS_EV 64000027
#define OPENCL_CLENQUEUEMARKERWITHWAITLIST_EV 64000028
#define OPENCL_CLENQUEUEBARRIERWITHWAITLIST_EV 64000029
#define OPENCL_CLENQUEUEMAPBUFFER_EV 64000030
#define OPENCL_CLENQUEUEUNMAPMEMOBJECT_EV 64000031
#define OPENCL_CLENQUEUEMIGRATEMEMOBJECTS_EV 64000032
#define OPENCL_CLENQUEUEMARKER_EV 64000033
#define OPENCL_CLENQUEUEBARRIER_EV 64000034
#define OPENCL_CLRETAINCOMMANDQUEUE_EV 64000035
#define OPENCL_CLRELEASECOMMANDQUEUE_EV 64000036
#define OPENCL_CLRETAINCONTEXT_EV 64000037
#define OPENCL_CLRELEASECONTEXT_EV 64000038
#define OPENCL_CLRETAINDEVICE_EV 64000039
#define OPENCL_CLRELEASEDEVICE_EV 64000040
#define OPENCL_CLRETAINEVENT_EV 64000041
#define OPENCL_CLRELEASEEVENT_EV 64000042
#define OPENCL_CLRETAINKERNEL_EV 64000043
#define OPENCL_CLRELEASEKERNEL_EV 64000044
#define OPENCL_CLRETAINMEMOBJECT_EV 64000045
#define OPENCL_CLRELEASEMEMOBJECT_EV 64000046
#define OPENCL_CLRETAINPROGRAM_EV 64000047
#define OPENCL_CLRELEASEPROGRAM_EV 64000048
/* Added to complement CUDA memcpyasync */
#define OPENCL_CLENQUEUEREADBUFFER_ASYNC_EV 64000049
#define OPENCL_CLENQUEUEREADBUFFERRECT_ASYNC_EV 64000050
#define OPENCL_CLENQUEUEWRITEBUFFER_ASYNC_EV 64000051
#define OPENCL_CLENQUEUEWRITEBUFFERRECT_ASYNC_EV 64000052
#define OPENCL_CLMEMOP_SIZE_EV 64099999
/* OpenCL Accelerator side events */
#define OPENCL_CLENQUEUEFILLBUFFER_ACC_EV 64100012
#define OPENCL_CLENQUEUECOPYBUFFER_ACC_EV 64100013
#define OPENCL_CLENQUEUECOPYBUFFERRECT_ACC_EV 64100014
#define OPENCL_CLENQUEUENDRANGEKERNEL_ACC_EV 64100015
#define OPENCL_CLENQUEUETASK_ACC_EV 64100016
#define OPENCL_CLENQUEUENATIVEKERNEL_ACC_EV 64100017
#define OPENCL_CLENQUEUEREADBUFFER_ACC_EV 64100018
#define OPENCL_CLENQUEUEREADBUFFERRECT_ACC_EV 64100019
#define OPENCL_CLENQUEUEWRITEBUFFER_ACC_EV 64100020
#define OPENCL_CLENQUEUEWRITEBUFFERRECT_ACC_EV 64100021
#define OPENCL_CLENQUEUEMARKERWITHWAITLIST_ACC_EV 64100028
#define OPENCL_CLENQUEUEBARRIERWITHWAITLIST_ACC_EV 64100029
#define OPENCL_CLENQUEUEMAPBUFFER_ACC_EV 64100030
#define OPENCL_CLENQUEUEUNMAPMEMOBJECT_ACC_EV 64100031
#define OPENCL_CLENQUEUEMIGRATEMEMOBJECTS_ACC_EV 64100032
#define OPENCL_CLENQUEUEMARKER_ACC_EV 64100033
#define OPENCL_CLENQUEUEBARRIER_ACC_EV 64100034
/* Added to complement CUDA memcpyasync */
#define OPENCL_CLENQUEUEREADBUFFER_ASYNC_ACC_EV 64100049
#define OPENCL_CLENQUEUEREADBUFFERRECT_ASYNC_ACC_EV 64100050
#define OPENCL_CLENQUEUEWRITEBUFFER_ASYNC_ACC_EV 64100051
#define OPENCL_CLENQUEUEWRITEBUFFERRECT_ASYNC_ACC_EV 64100052
/* To associate command queue with Paraver thread */
#define OPENCL_CLFINISH_THID_EV 64300000
#define CALLER_EV 70000000
#define CALLER_LINE_EV 80000000
#define ONLINE_EV 50000
#define CLUSTER_ID_EV 90000001
#define CLUSTER_SUPPORT_EV 92000001
#define SPECTRAL_PERIOD_EV 91000001
#define GREMLIN_EV 93000001
/*
* Values.
*/
#define WORK_WSH_VAL 1
#define WORK_REG_VAL 2
#define WORK_DOSINGLE_VAL 3 /* The thread goes to do the single section */
/*
* Parallelism values.
*/
#define PAR_END_VAL 0 /* Close parallel (region and * worksharing constructs). */
#define PAR_WSH_VAL 1 /* Parallel worksharing constructs : * PARALLEL DO */
#define PAR_SEC_VAL 2 /* Parallel worksharing constructs : * PARALLEL SECTIONS */
#define PAR_REG_VAL 3 /* Parallel region construct : * PARALLEL. */
/*
* Worksharing construct values
*/
#define WSH_END_VAL 0 /* worsharing ending : DO, SINGLE * and SECTIONS */
#define WSH_DO_VAL 4 /* worksharing constructs : DO * and SECTIONS. */
#define WSH_SEC_VAL 5 /* worksharing constructs : DO * and SECTIONS. */
#define WSH_SINGLE_VAL 6 /* worksharing construct : SINGLE */
#define WSH_MASTER_VAL 7 /* worksharing construct : MASTER */
/* Workharing ending values */
#define JOIN_WAIT_VAL 1
#define JOIN_NOWAIT_VAL 2
/*
* Lock Values.
*/
#define UNLOCKED_VAL 0 /* Unlocked Status. Mutex is unlocked. */
#define LOCK_VAL 3 /* Inside an acquire lock function. */
#define UNLOCK_VAL 5 /* Inside a release lock function. */
#define LOCKED_VAL 6 /* Locked Status. Mutex is locked. */
/*
* Ordered Values.
*/
#define OUTORDERED_VAL 0 /* Outside ordered section in ordered or doacross loop. */
#define WAITORDERED_VAL 3 /* Waiting to enter ordered section in ordered or doacross loop */
#define POSTORDERED_VAL 5 /* Signaling the exit from ordered section in ordered or doacross loop */
#define INORDERED_VAL 6 /* Inside ordered section in ordered or doacross loop. */
#if defined(DEAD_CODE)
/*
* Some Ordered Values.
*/
#define IT_MARK_VAL 2
#define WAIT_BEGIN_VAL 3
#define WAIT_END_VAL 4
#endif
#define STATE_ANY -1
#define STATE_IDLE 0
#define STATE_RUNNING 1
#define STATE_STOPPED 2
#define STATE_WAITMESS 3
#define STATE_BLOCKED 9
#define STATE_SYNC 5
#define STATE_BARRIER 5
#define STATE_TWRECV 8
#define STATE_OVHD 7
#define STATE_PROBE 6
#define STATE_BSEND 4
#define STATE_SEND 4
#define STATE_RSEND 4
#define STATE_SSEND 4
#define STATE_IBSEND 10
#define STATE_ISEND 10
#define STATE_IRSEND 10
#define STATE_ISSEND 10
#define STATE_IWAITMESS 11
#define STATE_IRECV 11
#define STATE_IO 12
#define STATE_FLUSH 12
#define STATE_BCAST 13
#define STATE_NOT_TRACING 14
#define STATE_INITFINI 15
#define STATE_OTHERS 15
#define STATE_MIXED 15
#define STATE_SENDRECVOP 16
#define STATE_MEMORY_XFER 17
#define STATE_PROFILING 18
#define STATE_ONLINE_ANALYSIS 19
/* Added for SHMEM */
#define STATE_REMOTE_MEM_ACCESS 20
#define STATE_ATOMIC_MEM_OP 21
#define STATE_MEM_ORDERING 22
#define STATE_LOCKING 23
/* Added for Dimemas */
#define STATE_OVERHEAD 24
#define STATE_1SIDED 25
#define STATE_STARTUP_LATENCY 26
#define STATE_WAIT_LINKS 27
#define STATE_DATA_COPY 28
#define STATE_RTT 29
/* Added for malloc calls */
#define STATE_ALLOCMEM 30
#define STATE_FREEMEM 31
#if defined(DEAD_CODE)
/* ==========================================================================
==== MPI Dimemas Block Numbers
========================================================================== */
typedef enum
{
/* 000 */ BLOCK_ID_NULL,
/* 001 */ BLOCK_ID_MPI_Allgather,
/* 002 */ BLOCK_ID_MPI_Allgatherv,
/* 003 */ BLOCK_ID_MPI_Allreduce,
/* 004 */ BLOCK_ID_MPI_Alltoall,
/* 005 */ BLOCK_ID_MPI_Alltoallv,
/* 006 */ BLOCK_ID_MPI_Barrier,
/* 007 */ BLOCK_ID_MPI_Bcast,
/* 008 */ BLOCK_ID_MPI_Gather,
/* 009 */ BLOCK_ID_MPI_Gatherv,
/* 010 */ BLOCK_ID_MPI_Op_create,
/* 011 */ BLOCK_ID_MPI_Op_free,
/* 012 */ BLOCK_ID_MPI_Reduce_scatter,
/* 013 */ BLOCK_ID_MPI_Reduce,
/* 014 */ BLOCK_ID_MPI_Scan,
/* 015 */ BLOCK_ID_MPI_Scatter,
/* 016 */ BLOCK_ID_MPI_Scatterv,
/* 017 */ BLOCK_ID_MPI_Attr_delete,
/* 018 */ BLOCK_ID_MPI_Attr_get,
/* 019 */ BLOCK_ID_MPI_Attr_put,
/* 020 */ BLOCK_ID_MPI_Comm_create,
/* 021 */ BLOCK_ID_MPI_Comm_dup,
/* 022 */ BLOCK_ID_MPI_Comm_free,
/* 023 */ BLOCK_ID_MPI_Comm_group,
/* 024 */ BLOCK_ID_MPI_Comm_rank,
/* 025 */ BLOCK_ID_MPI_Comm_remote_group,
/* 026 */ BLOCK_ID_MPI_Comm_remote_size,
/* 027 */ BLOCK_ID_MPI_Comm_size,
/* 028 */ BLOCK_ID_MPI_Comm_split,
/* 029 */ BLOCK_ID_MPI_Comm_test_inter,
/* 030 */ BLOCK_ID_MPI_Comm_compare,
/* 031 */ BLOCK_ID_MPI_Group_difference,
/* 032 */ BLOCK_ID_MPI_Group_excl,
/* 033 */ BLOCK_ID_MPI_Group_free,
/* 034 */ BLOCK_ID_MPI_Group_incl,
/* 035 */ BLOCK_ID_MPI_Group_intersection,
/* 036 */ BLOCK_ID_MPI_Group_rank,
/* 037 */ BLOCK_ID_MPI_Group_range_excl,
/* 038 */ BLOCK_ID_MPI_Group_range_incl,
/* 039 */ BLOCK_ID_MPI_Group_size,
/* 040 */ BLOCK_ID_MPI_Group_translate_ranks,
/* 041 */ BLOCK_ID_MPI_Group_union,
/* 042 */ BLOCK_ID_MPI_Group_compare,
/* 043 */ BLOCK_ID_MPI_Intercomm_create,
/* 044 */ BLOCK_ID_MPI_Intercomm_merge,
/* 045 */ BLOCK_ID_MPI_Keyval_free,
/* 046 */ BLOCK_ID_MPI_Keyval_create,
/* 047 */ BLOCK_ID_MPI_Abort,
/* 048 */ BLOCK_ID_MPI_Error_class,
/* 049 */ BLOCK_ID_MPI_Errhandler_create,
/* 050 */ BLOCK_ID_MPI_Errhandler_free,
/* 051 */ BLOCK_ID_MPI_Errhandler_get,
/* 052 */ BLOCK_ID_MPI_Error_string,
/* 053 */ BLOCK_ID_MPI_Errhandler_set,
/* 054 */ BLOCK_ID_MPI_Finalize,
/* 055 */ BLOCK_ID_MPI_Get_processor_name,
/* 056 */ BLOCK_ID_MPI_Init,
/* 057 */ BLOCK_ID_MPI_Initialized,
/* 058 */ BLOCK_ID_MPI_Wtick,
/* 059 */ BLOCK_ID_MPI_Wtime,
/* 060 */ BLOCK_ID_MPI_Address,
/* 061 */ BLOCK_ID_MPI_Bsend,
/* 062 */ BLOCK_ID_MPI_Bsend_init,
/* 063 */ BLOCK_ID_MPI_Buffer_attach,
/* 064 */ BLOCK_ID_MPI_Buffer_detach,
/* 065 */ BLOCK_ID_MPI_Cancel,
/* 066 */ BLOCK_ID_MPI_Request_free,
/* 067 */ BLOCK_ID_MPI_Recv_init,
/* 068 */ BLOCK_ID_MPI_Send_init,
/* 069 */ BLOCK_ID_MPI_Get_count,
/* 070 */ BLOCK_ID_MPI_Get_elements,
/* 071 */ BLOCK_ID_MPI_Ibsend,
/* 072 */ BLOCK_ID_MPI_Iprobe,
/* 073 */ BLOCK_ID_MPI_Irecv,
/* 074 */ BLOCK_ID_MPI_Irsend,
/* 075 */ BLOCK_ID_MPI_Isend,
/* 076 */ BLOCK_ID_MPI_Issend,
/* 077 */ BLOCK_ID_MPI_Pack,
/* 078 */ BLOCK_ID_MPI_Pack_size,
/* 079 */ BLOCK_ID_MPI_Probe,
/* 080 */ BLOCK_ID_MPI_Recv,
/* 081 */ BLOCK_ID_MPI_Rsend,
/* 082 */ BLOCK_ID_MPI_Rsend_init,
/* 083 */ BLOCK_ID_MPI_Send,
/* 084 */ BLOCK_ID_MPI_Sendrecv,
/* 085 */ BLOCK_ID_MPI_Sendrecv_replace,
/* 086 */ BLOCK_ID_MPI_Ssend,
/* 087 */ BLOCK_ID_MPI_Ssend_init,
/* 088 */ BLOCK_ID_MPI_Start,
/* 089 */ BLOCK_ID_MPI_Startall,
/* 090 */ BLOCK_ID_MPI_Test,
/* 091 */ BLOCK_ID_MPI_Testall,
/* 092 */ BLOCK_ID_MPI_Testany,
/* 093 */ BLOCK_ID_MPI_Test_cancelled,
/* 094 */ BLOCK_ID_MPI_Test_some,
/* 095 */ BLOCK_ID_MPI_Type_commit,
/* 096 */ BLOCK_ID_MPI_Type_contiguous,
/* 097 */ BLOCK_ID_MPI_Type_extent,
/* 098 */ BLOCK_ID_MPI_Type_free,
/* 099 */ BLOCK_ID_MPI_Type_hindexed,
/* 100 */ BLOCK_ID_MPI_Type_hvector,
/* 101 */ BLOCK_ID_MPI_Type_indexed,
/* 102 */ BLOCK_ID_MPI_Type_lb,
/* 103 */ BLOCK_ID_MPI_Type_size,
/* 104 */ BLOCK_ID_MPI_Type_struct,
/* 105 */ BLOCK_ID_MPI_Type_ub,
/* 106 */ BLOCK_ID_MPI_Type_vector,
/* 107 */ BLOCK_ID_MPI_Unpack,
/* 108 */ BLOCK_ID_MPI_Wait,
/* 109 */ BLOCK_ID_MPI_Waitall,
/* 110 */ BLOCK_ID_MPI_Waitany,
/* 111 */ BLOCK_ID_MPI_Waitsome,
/* 112 */ BLOCK_ID_MPI_Cart_coords,
/* 113 */ BLOCK_ID_MPI_Cart_create,
/* 114 */ BLOCK_ID_MPI_Cart_get,
/* 115 */ BLOCK_ID_MPI_Cart_map,
/* 116 */ BLOCK_ID_MPI_Cart_rank,
/* 117 */ BLOCK_ID_MPI_Cart_shift,
/* 118 */ BLOCK_ID_MPI_Cart_sub,
/* 119 */ BLOCK_ID_MPI_Cartdim_get,
/* 120 */ BLOCK_ID_MPI_Dims_create,
/* 121 */ BLOCK_ID_MPI_Graph_get,
/* 122 */ BLOCK_ID_MPI_Graph_map,
/* 123 */ BLOCK_ID_MPI_Graph_create,
/* 124 */ BLOCK_ID_MPI_Graph_neighbors,
/* 125 */ BLOCK_ID_MPI_Graphdims_get,
/* 126 */ BLOCK_ID_MPI_Graph_neighbors_count,
/* 127 */ BLOCK_ID_MPI_Topo_test,
/* 128 */ BLOCK_ID_TRACE_ON,
/* 129 */ BLOCK_ID_IO_Read,
/* 130 */ BLOCK_ID_IO_Write,
/* 131 */ BLOCK_ID_IO,
/* 132 */ BLOCK_ID_MPI_Win_create,
/* 133 */ BLOCK_ID_MPI_Win_free,
/* 134 */ BLOCK_ID_MPI_Put,
/* 135 */ BLOCK_ID_MPI_Get,
/* 136 */ BLOCK_ID_MPI_Accumulate,
/* 137 */ BLOCK_ID_MPI_Win_fence,
/* 138 */ BLOCK_ID_MPI_Win_complete,
/* 139 */ BLOCK_ID_MPI_Win_start,
/* 140 */ BLOCK_ID_MPI_Win_post,
/* 141 */ BLOCK_ID_MPI_Win_wait,
/* 142 */ BLOCK_ID_MPI_Win_test,
/* 143 */ BLOCK_ID_MPI_Win_lock,
/* 144 */ BLOCK_ID_MPI_Win_unlock,
/* 145 */ BLOCK_ID_MPI_Init_thread,
/* 146 */ BLOCK_ID_LAPI_Init,
/* 147 */ BLOCK_ID_LAPI_Term,
/* 148 */ BLOCK_ID_LAPI_Put,
/* 149 */ BLOCK_ID_LAPI_Get,
/* 150 */ BLOCK_ID_LAPI_Fence,
/* 151 */ BLOCK_ID_LAPI_Gfence,
/* 152 */ BLOCK_ID_LAPI_Address_init,
/* 153 */ BLOCK_ID_LAPI_Amsend,
/* 154 */ BLOCK_ID_LAPI_Rmw,
/* 155 */ BLOCK_ID_LAPI_Waitcntr
/* 156 */ BLOCK_ID_MPI_Reduce_scatter_block,
/* 157 */ BLOCK_ID_MPI_Alltoallw,
/* 158 */ BLOCK_ID_MPI_Get_accumulate
} DimBlock;
#endif
/* ==========================================================================
==== MPI Dimemas Collective Communications Identifiers
========================================================================== */
typedef enum
{
GLOP_ID_NULL = -1,
GLOP_ID_MPI_Barrier = 0,
GLOP_ID_MPI_Bcast = 1,
GLOP_ID_MPI_Gather = 2,
GLOP_ID_MPI_Gatherv = 3,
GLOP_ID_MPI_Scatter = 4,
GLOP_ID_MPI_Scatterv = 5,
GLOP_ID_MPI_Allgather = 6,
GLOP_ID_MPI_Allgatherv = 7,
GLOP_ID_MPI_Alltoall = 8,
GLOP_ID_MPI_Alltoallv = 9,
GLOP_ID_MPI_Reduce = 10,
GLOP_ID_MPI_Allreduce = 11,
GLOP_ID_MPI_Reduce_scatter = 12,
GLOP_ID_MPI_Scan = 13,
GLOP_ID_MPI_Reduce_scatter_block = 14,
GLOP_ID_MPI_Alltoallw = 15
} DimCollectiveOp;
typedef enum
{
MPI_TYPE = 1,
MPI_COMM_ALIAS_TYPE,
MISC_TYPE,
OPENMP_TYPE,
PTHREAD_TYPE,
CUDA_TYPE,
OPENCL_TYPE,
OPENSHMEM_TYPE,
JAVA_TYPE
} EventType_t;
/* File descriptor classifications */
typedef enum
{
DESCRIPTOR_TYPE_UNKNOWN = 0, /* unknown */
DESCRIPTOR_TYPE_REGULARFILE, /* regular file */
DESCRIPTOR_TYPE_SOCKET, /* socket */
DESCRIPTOR_TYPE_FIFO_PIPE, /* fifo or pipe */
DESCRIPTOR_TYPE_ATTY /* connected to the terminal? */
} ExtraeDescriptorType_t;
EventType_t getEventType (unsigned EvType, unsigned *Type);
#endif /* __EVENTS_H_INCLUDED__ */
|
valid.mob1.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_32_112_112_32_3_3.h"
#include "gen_ukr_A4B2gemm_1_32_112_112_32_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 112;
int Ny = 112;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
/* int Tc1 = 8; // 1 8 16 32 48 */
/* int Txy3 = 12*8; // 18, 36, 72, 144 */
/* int Tf2 = 32; // 32, 64, 128 ,256 */
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<32+0;c5+=32)
{
for(int f5=0;f5<32+0;f5+=32)
{
for(int xy5=0;xy5<12544+0;xy5+=12544)
{
for(int c4=c5;c4<min(32, 32+c5);c4+=32)
{
for(int xy4=xy5;xy4<min(12544, 12544+xy5);xy4+=12544)
{
for(int f4=f5;f4<min(32, 32+f5);f4+=Tf2)
{
for(int xy3=xy4;xy3<min(12544, 12544+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(32, Tf2+f4);f3+=Tf2)
{
for(int c3=c4;c3<min(32, 32+c4);c3+=Tc1)
{
for(int xy2=xy3;xy2<min(12544, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(32, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(32, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(32, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(12544, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(32, 16+f2);f1+=16)
{
int ctile=min(Tc1, 32-c1);
int x1=xy1/112;
int y1=xy1%112/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*415872+c1_1*12996+1*x1*114+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*4608+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*401408+of1_1*12544+x1*112+y1*1+of1_2*1;
if(112-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(112*112-xy1>=6){
for(int sti=112-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=112-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
par_strength.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.19 $
***********************************************************************EHEADER*/
/******************************************************************************
*
*****************************************************************************/
/* following should be in a header file */
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Generates strength matrix
Notes:
\begin{itemize}
\item The underlying matrix storage scheme is a hypre_ParCSR matrix.
\item The routine returns the following:
\begin{itemize}
\item S - a ParCSR matrix representing the "strength matrix". This is
used in the coarsening and interpolation routines.
\end{itemize}
\item The graph of the "strength matrix" for A is a subgraph of the
graph of A, but requires nonsymmetric storage even if A is
symmetric. This is because of the directional nature of the
"strengh of dependence" notion (see below). Since we are using
nonsymmetric storage for A right now, this is not a problem. If we
ever add the ability to store A symmetrically, then we could store
the strength graph as floats instead of doubles to save space.
\item This routine currently "compresses" the strength matrix. We
should consider the possibility of defining this matrix to have the
same "nonzero structure" as A. To do this, we could use the same
A\_i and A\_j arrays, and would need only define the S\_data array.
There are several pros and cons to discuss.
\end{itemize}
Terminology:
\begin{itemize}
\item Ruge's terminology: A point is "strongly connected to" $j$, or
"strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$.
\item Here, we retain some of this terminology, but with a more
generalized notion of "strength". We also retain the "natural"
graph notation for representing the directed graph of a matrix.
That is, the nonzero entry $a_ij$ is represented as: i --> j. In
the strength matrix, S, the entry $s_ij$ is also graphically denoted
as above, and means both of the following:
\begin{itemize}
\item $i$ "depends on" $j$ with "strength" $s_ij$
\item $j$ "influences" $i$ with "strength" $s_ij$
\end{itemize}
\end{itemize}
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param A [IN]
coefficient matrix
@param strength_threshold [IN]
threshold parameter used to define strength
@param max_row_sum [IN]
parameter used to modify definition of strength for diagonal dominant matrices
@param S_ptr [OUT]
strength matrix
@see */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A,
double strength_threshold,
double max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
/* double *S_diag_data; */
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
/* double *S_offd_data; */
double diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
/* row_starts is owned by A, col_starts = row_starts */
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int,hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,0);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
row_sum = fabs( row_sum / diag );
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((row_sum > max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may be removed, and
* coarsening will still be done correctly. However, the routine
* that builds interpolation would have to be modified first.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd);
return (ierr);
}
/*==========================================================================*/
/*==========================================================================*/
/**
Generates strength matrix
Notes:
\begin{itemize}
\item The underlying matrix storage scheme is a hypre_ParCSR matrix.
\item The routine returns the following:
\begin{itemize}
\item S - a ParCSR matrix representing the "strength matrix". This is
used in the coarsening and interpolation routines.
\end{itemize}
\item The graph of the "strength matrix" for A is a subgraph of the
graph of A, but requires nonsymmetric storage even if A is
symmetric. This is because of the directional nature of the
"strengh of dependence" notion (see below). Since we are using
nonsymmetric storage for A right now, this is not a problem. If we
ever add the ability to store A symmetrically, then we could store
the strength graph as floats instead of doubles to save space.
\item This routine currently "compresses" the strength matrix. We
should consider the possibility of defining this matrix to have the
same "nonzero structure" as A. To do this, we could use the same
A\_i and A\_j arrays, and would need only define the S\_data array.
There are several pros and cons to discuss.
\end{itemize}
Terminology:
\begin{itemize}
\item Ruge's terminology: A point is "strongly connected to" $j$, or
"strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$.
\item Here, we retain some of this terminology, but with a more
generalized notion of "strength". We also retain the "natural"
graph notation for representing the directed graph of a matrix.
That is, the nonzero entry $a_ij$ is represented as: i --> j. In
the strength matrix, S, the entry $s_ij$ is also graphically denoted
as above, and means both of the following:
\begin{itemize}
\item $i$ "depends on" $j$ with "strength" $s_ij$
\item $j$ "influences" $i$ with "strength" $s_ij$
\end{itemize}
\end{itemize}
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param A [IN]
coefficient matrix
@param strength_threshold [IN]
threshold parameter used to define strength
@param max_row_sum [IN]
parameter used to modify definition of strength for diagonal dominant matrices
@param S_ptr [OUT]
strength matrix
@see */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A,
double strength_threshold,
double max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
/* double *S_diag_data; */
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
/* double *S_offd_data; */
double diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
/* row_starts is owned by A, col_starts = row_starts */
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int,hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,0);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, fabs(A_diag_data[jA]));
row_sum += fabs(A_diag_data[jA]);
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, fabs(A_offd_data[jA]));
row_sum += fabs(A_offd_data[jA]);
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, fabs(A_diag_data[jA]));
row_sum += fabs(A_diag_data[jA]);
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, fabs(A_offd_data[jA]));
row_sum += fabs(A_offd_data[jA]);
}
}
row_sum = fabs( row_sum / diag );
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((row_sum > max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may be removed, and
* coarsening will still be done correctly. However, the routine
* that builds interpolation would have to be modified first.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd);
return (ierr);
}
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *S,
HYPRE_Int **col_offd_S_to_A_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommPkg *comm_pkg_S;
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts_A =
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int *send_procs_A =
hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts_A =
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
HYPRE_Int *recv_procs_S;
HYPRE_Int *recv_vec_starts_S;
HYPRE_Int *send_procs_S;
HYPRE_Int *send_map_starts_S;
HYPRE_Int *send_map_elmts_S;
HYPRE_Int *col_offd_S_to_A;
HYPRE_Int *S_marker;
HYPRE_Int *send_change;
HYPRE_Int *recv_change;
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int num_cols_offd_S;
HYPRE_Int i, j, jcol;
HYPRE_Int proc, cnt, proc_cnt, total_nz;
HYPRE_Int first_row;
HYPRE_Int ierr = 0;
HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int num_sends_S;
HYPRE_Int num_recvs_S;
HYPRE_Int num_nonzeros;
num_nonzeros = S_offd_i[num_variables];
S_marker = NULL;
if (num_cols_offd_A)
S_marker = hypre_CTAlloc(HYPRE_Int,num_cols_offd_A);
for (i=0; i < num_cols_offd_A; i++)
S_marker[i] = -1;
for (i=0; i < num_nonzeros; i++)
{
jcol = S_offd_j[i];
S_marker[jcol] = 0;
}
proc = 0;
proc_cnt = 0;
cnt = 0;
num_recvs_S = 0;
for (i=0; i < num_recvs_A; i++)
{
for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++)
{
if (!S_marker[j])
{
S_marker[j] = cnt;
cnt++;
proc = 1;
}
}
if (proc) {num_recvs_S++; proc = 0;}
}
num_cols_offd_S = cnt;
recv_change = NULL;
recv_procs_S = NULL;
send_change = NULL;
if (col_map_offd_S) hypre_TFree(col_map_offd_S);
col_map_offd_S = NULL;
col_offd_S_to_A = NULL;
if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A);
if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A);
if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S);
recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1);
if (num_cols_offd_S)
{
col_map_offd_S = hypre_CTAlloc(HYPRE_Int,num_cols_offd_S);
col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int,num_cols_offd_S);
}
if (num_cols_offd_S < num_cols_offd_A)
{
for (i=0; i < num_nonzeros; i++)
{
jcol = S_offd_j[i];
S_offd_j[i] = S_marker[jcol];
}
proc = 0;
proc_cnt = 0;
cnt = 0;
recv_vec_starts_S[0] = 0;
for (i=0; i < num_recvs_A; i++)
{
for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++)
{
if (S_marker[j] != -1)
{
col_map_offd_S[cnt] = col_map_offd_A[j];
col_offd_S_to_A[cnt++] = j;
proc = 1;
}
}
recv_change[i] = j-cnt-recv_vec_starts_A[i]
+recv_vec_starts_S[proc_cnt];
if (proc)
{
recv_procs_S[proc_cnt++] = recv_procs_A[i];
recv_vec_starts_S[proc_cnt] = cnt;
proc = 0;
}
}
}
else
{
for (i=0; i < num_recvs_A; i++)
{
for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++)
{
col_map_offd_S[j] = col_map_offd_A[j];
col_offd_S_to_A[j] = j;
}
recv_procs_S[i] = recv_procs_A[i];
recv_vec_starts_S[i] = recv_vec_starts_A[i];
}
recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A];
}
requests = hypre_CTAlloc(hypre_MPI_Request,num_sends_A+num_recvs_A);
j=0;
for (i=0; i < num_sends_A; i++)
hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i],
0,comm,&requests[j++]);
for (i=0; i < num_recvs_A; i++)
hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i],
0,comm,&requests[j++]);
status = hypre_CTAlloc(hypre_MPI_Status,j);
hypre_MPI_Waitall(j,requests,status);
hypre_TFree(status);
hypre_TFree(requests);
num_sends_S = 0;
total_nz = send_map_starts_A[num_sends_A];
for (i=0; i < num_sends_A; i++)
{
if (send_change[i])
{
if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i])
num_sends_S++;
}
else
num_sends_S++;
total_nz -= send_change[i];
}
send_procs_S = NULL;
if (num_sends_S)
send_procs_S = hypre_CTAlloc(HYPRE_Int,num_sends_S);
send_map_starts_S = hypre_CTAlloc(HYPRE_Int,num_sends_S+1);
send_map_elmts_S = NULL;
if (total_nz)
send_map_elmts_S = hypre_CTAlloc(HYPRE_Int,total_nz);
proc = 0;
proc_cnt = 0;
for (i=0; i < num_sends_A; i++)
{
cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i];
if (cnt)
{
send_procs_S[proc_cnt++] = send_procs_A[i];
send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt;
}
}
comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(comm_pkg_S) = comm;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S;
hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S;
hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S;
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_S, col_map_offd_S,
send_map_elmts_S);
hypre_ParCSRCommHandleDestroy(comm_handle);
first_row = hypre_ParCSRMatrixFirstRowIndex(A);
if (first_row)
for (i=0; i < send_map_starts_S[num_sends_S]; i++)
send_map_elmts_S[i] -= first_row;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S;
hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S;
hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S;
hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S;
hypre_TFree(S_marker);
hypre_TFree(send_change);
hypre_TFree(recv_change);
*col_offd_S_to_A_ptr = col_offd_S_to_A;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points
* for second coarsening pass in aggressive coarsening (S*S+2S)
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker,
HYPRE_Int num_paths, HYPRE_Int *coarse_row_starts, hypre_ParCSRMatrix **C_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag);
HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
hypre_ParCSRMatrix *S2;
HYPRE_Int *col_map_offd_C = NULL;
hypre_CSRMatrix *C_diag;
HYPRE_Int *C_diag_data = NULL;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j = NULL;
hypre_CSRMatrix *C_offd;
HYPRE_Int *C_offd_data=NULL;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j=NULL;
HYPRE_Int C_diag_size;
HYPRE_Int C_offd_size;
HYPRE_Int num_cols_offd_C = 0;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int S_ext_diag_size = 0;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_Int S_ext_offd_size = 0;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *S_marker = NULL;
HYPRE_Int *S_marker_offd = NULL;
HYPRE_Int *temp = NULL;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *map_S_to_C = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int num_recvs = 0;
HYPRE_Int *send_map_starts;
HYPRE_Int *tmp_send_map_starts = NULL;
HYPRE_Int *send_map_elmts;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *tmp_recv_vec_starts = NULL;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int i, j, k;
HYPRE_Int i1, i2, i3;
HYPRE_Int jj1, jj2, jcol, jrow, j_cnt;
HYPRE_Int jj_count_diag, jj_count_offd;
HYPRE_Int jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int cnt, cnt_offd, cnt_diag;
HYPRE_Int num_procs, my_id;
HYPRE_Int value, index;
HYPRE_Int num_coarse;
HYPRE_Int num_coarse_offd;
HYPRE_Int num_nonzeros;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
HYPRE_Int global_num_coarse;
HYPRE_Int my_first_cpt, my_last_cpt;
HYPRE_Int *S_int_i = NULL;
HYPRE_Int *S_int_j = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Int *S_ext_j = NULL;
/*-----------------------------------------------------------------------
* Extract S_ext, i.e. portion of B that is stored on neighbor procs
* and needed locally for matrix matrix product
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = coarse_row_starts[0];
my_last_cpt = coarse_row_starts[1]-1;
if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1];
hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = coarse_row_starts[my_id];
my_last_cpt = coarse_row_starts[my_id+1]-1;
global_num_coarse = coarse_row_starts[num_procs];
#endif
if (num_cols_offd_S)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S);
}
if (num_cols_diag_S) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, num_cols_diag_S);
num_coarse = 0;
for (i=0; i < num_cols_diag_S; i++)
{
if (CF_marker[i] > 0)
{
fine_to_coarse[i] = num_coarse + my_first_cpt;
num_coarse++;
}
else
{
fine_to_coarse[i] = -1;
}
}
if (num_procs > 1)
{
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]);
index = 0;
for (i = 0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
int_buf_data[index++]
= fine_to_coarse[send_map_elmts[j]];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
for (i=0; i < num_cols_diag_S; i++)
if (CF_marker[i] > 0)
fine_to_coarse[i] -= my_first_cpt;
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i = 0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
int_buf_data[index++] = CF_marker[send_map_elmts[j]];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
S_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1);
S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1);
/*--------------------------------------------------------------------------
* generate S_int_i through adding number of coarse row-elements of offd and diag
* for corresponding rows. S_int_i[j+1] contains the number of coarse elements of
* a row j (which is determined through send_map_elmts)
*--------------------------------------------------------------------------*/
S_int_i[0] = 0;
j_cnt = 0;
num_nonzeros = 0;
for (i=0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
jrow = send_map_elmts[j];
index = 0;
for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++)
{
if (CF_marker[S_diag_j[k]] > 0) index++;
}
for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++)
{
if (CF_marker_offd[S_offd_j[k]] > 0) index++;
}
S_int_i[++j_cnt] = index;
num_nonzeros += S_int_i[j_cnt];
}
}
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
if (num_procs > 1)
comm_handle =
hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]);
if (num_nonzeros) S_int_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros);
tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1);
tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1);
tmp_send_map_starts[0] = 0;
j_cnt = 0;
for (i=0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
jrow = send_map_elmts[j];
for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++)
{
if (CF_marker[S_diag_j[k]] > 0)
S_int_j[j_cnt++] = fine_to_coarse[S_diag_j[k]]+my_first_cpt;
}
for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++)
{
if (CF_marker_offd[S_offd_j[k]] > 0)
S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]];
}
}
tmp_send_map_starts[i+1] = j_cnt;
}
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* after communication exchange S_ext_i[j+1] contains the number of coarse elements
* of a row j !
* evaluate S_ext_i and compute num_nonzeros for S_ext
*--------------------------------------------------------------------------*/
for (i=0; i < recv_vec_starts[num_recvs]; i++)
S_ext_i[i+1] += S_ext_i[i];
num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]];
if (num_nonzeros) S_ext_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros);
tmp_recv_vec_starts[0] = 0;
for (i=0; i < num_recvs; i++)
tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]];
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts;
comm_handle = hypre_ParCSRCommHandleCreate(11,tmp_comm_pkg,S_int_j,S_ext_j);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(tmp_send_map_starts);
hypre_TFree(tmp_recv_vec_starts);
hypre_TFree(tmp_comm_pkg);
hypre_TFree(S_int_i);
hypre_TFree(S_int_j);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
for (i=0; i < num_cols_offd_S; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
{
if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt)
S_ext_offd_size++;
else
S_ext_diag_size++;
}
}
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1);
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size);
}
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
num_coarse_offd = 0;
for (i=0; i < num_cols_offd_S; i++)
{
if (CF_marker_offd[i] > 0) num_coarse_offd++;
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
{
i1 = S_ext_j[j];
if (i1 < my_first_cpt || i1 > my_last_cpt)
S_ext_offd_j[cnt_offd++] = i1;
else
S_ext_diag_j[cnt_diag++] = i1 - my_first_cpt;
}
S_ext_diag_i[++cnt] = cnt_diag;
S_ext_offd_i[cnt] = cnt_offd;
}
hypre_TFree(S_ext_i);
hypre_TFree(S_ext_j);
cnt = 0;
if (S_ext_offd_size || num_coarse_offd)
{
temp = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size+num_coarse_offd);
for (i=0; i < S_ext_offd_size; i++)
temp[i] = S_ext_offd_j[i];
cnt = S_ext_offd_size;
for (i=0; i < num_cols_offd_S; i++)
if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i];
}
if (cnt)
{
qsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
col_map_offd_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_C);
for (i=0; i < num_cols_offd_C; i++)
col_map_offd_C[i] = temp[i];
if (S_ext_offd_size || num_coarse_offd)
hypre_TFree(temp);
for (i=0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BinarySearch(col_map_offd_C,
S_ext_offd_j[i],
num_cols_offd_C);
if (num_cols_offd_S)
{
map_S_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_S);
cnt = 0;
for (i=0; i < num_cols_offd_S; i++)
{
if (CF_marker_offd[i] > 0)
{
while (fine_to_coarse_offd[i] > col_map_offd_C[cnt])
{
cnt++;
}
map_S_to_C[i] = cnt++;
}
else
{
map_S_to_C[i] = -1;
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate and initialize some stuff.
*-----------------------------------------------------------------------*/
if (num_coarse) S_marker = hypre_CTAlloc(HYPRE_Int, num_coarse);
for (i1 = 0; i1 < num_coarse; i1++)
S_marker[i1] = -1;
S_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C);
for (i1 = 0; i1 < num_cols_offd_C; i1++)
S_marker_offd[i1] = -1;
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1);
/*-----------------------------------------------------------------------
* Loop over rows of S
*-----------------------------------------------------------------------*/
cnt = 0;
num_nonzeros_diag = 0;
num_nonzeros_offd = 0;
for (i1 = 0; i1 < num_cols_diag_S; i1++)
{
if (CF_marker[i1] > 0)
{
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
jcol = S_diag_j[jj1];
if (CF_marker[jcol] > 0)
{
S_marker[fine_to_coarse[jcol]] = i1;
num_nonzeros_diag++;
}
}
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
jcol = S_offd_j[jj1];
if (CF_marker_offd[jcol] > 0)
{
S_marker_offd[map_S_to_C[jcol]] = i1;
num_nonzeros_offd++;
}
}
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++)
{
i3 = S_diag_j[jj2];
if (CF_marker[i3] > 0 && S_marker[fine_to_coarse[i3]] != i1)
{
S_marker[fine_to_coarse[i3]] = i1;
num_nonzeros_diag++;
}
}
for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++)
{
i3 = S_offd_j[jj2];
if (CF_marker_offd[i3] > 0 &&
S_marker_offd[map_S_to_C[i3]] != i1)
{
S_marker_offd[map_S_to_C[i3]] = i1;
num_nonzeros_offd++;
}
}
}
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++)
{
i3 = S_ext_diag_j[jj2];
if (S_marker[i3] != i1)
{
S_marker[i3] = i1;
num_nonzeros_diag++;
}
}
for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++)
{
i3 = S_ext_offd_j[jj2];
if (S_marker_offd[i3] != i1)
{
S_marker_offd[i3] = i1;
num_nonzeros_offd++;
}
}
}
C_diag_i[++cnt] = num_nonzeros_diag;
C_offd_i[cnt] = num_nonzeros_offd;
}
}
if (num_nonzeros_diag)
{
C_diag_j = hypre_CTAlloc(HYPRE_Int,num_nonzeros_diag);
C_diag_data = hypre_CTAlloc(HYPRE_Int,num_nonzeros_diag);
}
if (num_nonzeros_offd)
{
C_offd_j = hypre_CTAlloc(HYPRE_Int,num_nonzeros_offd);
C_offd_data = hypre_CTAlloc(HYPRE_Int,num_nonzeros_offd);
}
for (i1 = 0; i1 < num_coarse; i1++)
S_marker[i1] = -1;
for (i1 = 0; i1 < num_cols_offd_C; i1++)
S_marker_offd[i1] = -1;
jj_count_diag = 0;
jj_count_offd = 0;
for (i1 = 0; i1 < num_cols_diag_S; i1++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, C_{i1,i1} (for square matrices).
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (CF_marker[i1] > 0)
{
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
jcol = S_diag_j[jj1];
if (CF_marker[jcol] > 0)
{
S_marker[fine_to_coarse[jcol]] = jj_count_diag;
C_diag_j[jj_count_diag] = fine_to_coarse[jcol];
C_diag_data[jj_count_diag] = 2;
jj_count_diag++;
}
}
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
jcol = S_offd_j[jj1];
if (CF_marker_offd[jcol] > 0)
{
index = map_S_to_C[jcol];
S_marker_offd[index] = jj_count_offd;
C_offd_j[jj_count_offd] = index;
C_offd_data[jj_count_offd] = 2;
jj_count_offd++;
}
}
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++)
{
i3 = S_diag_j[jj2];
if (CF_marker[i3] > 0)
{
if (S_marker[fine_to_coarse[i3]] < jj_row_begin_diag)
{
S_marker[fine_to_coarse[i3]] = jj_count_diag;
C_diag_j[jj_count_diag] = fine_to_coarse[i3];
C_diag_data[jj_count_diag]++;
jj_count_diag++;
}
else
{
C_diag_data[S_marker[fine_to_coarse[i3]]]++;
}
}
}
for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++)
{
i3 = S_offd_j[jj2];
if (CF_marker_offd[i3] > 0)
{
index = map_S_to_C[i3];
if (S_marker_offd[index] < jj_row_begin_offd)
{
S_marker_offd[index] = jj_count_offd;
C_offd_j[jj_count_offd] = index;
C_offd_data[jj_count_offd]++;
jj_count_offd++;
}
else
{
C_offd_data[S_marker_offd[index]]++;
}
}
}
}
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++)
{
i3 = S_ext_diag_j[jj2];
if (S_marker[i3] < jj_row_begin_diag)
{
S_marker[i3] = jj_count_diag;
C_diag_j[jj_count_diag] = i3;
C_diag_data[jj_count_diag]++;
jj_count_diag++;
}
else
{
C_diag_data[S_marker[i3]]++;
}
}
for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++)
{
i3 = S_ext_offd_j[jj2];
if (S_marker_offd[i3] < jj_row_begin_offd)
{
S_marker_offd[i3] = jj_count_offd;
C_offd_j[jj_count_offd] = i3;
C_offd_data[jj_count_offd]++;
jj_count_offd++;
}
else
{
C_offd_data[S_marker_offd[i3]]++;
}
}
}
}
}
cnt = 0;
for (i=0; i < num_coarse; i++)
{
for (j=C_diag_i[i]; j < C_diag_i[i+1]; j++)
{
jcol = C_diag_j[j];
if (C_diag_data[j] >= num_paths && jcol != i)
C_diag_j[cnt++] = jcol;
}
C_diag_i[i] = cnt;
}
if (num_nonzeros_diag) hypre_TFree(C_diag_data);
for (i=num_coarse; i > 0; i--)
C_diag_i[i] = C_diag_i[i-1];
C_diag_i[0] = 0;
cnt = 0;
for (i=0; i < num_coarse; i++)
{
for (j=C_offd_i[i]; j < C_offd_i[i+1]; j++)
{
jcol = C_offd_j[j];
if (C_offd_data[j] >= num_paths)
C_offd_j[cnt++] = jcol;
}
C_offd_i[i] = cnt;
}
if (num_nonzeros_offd) hypre_TFree(C_offd_data);
for (i=num_coarse; i > 0; i--)
C_offd_i[i] = C_offd_i[i-1];
C_offd_i[0] = 0;
cnt = 0;
for (i=0; i < num_cols_diag_S; i++)
{
if (CF_marker[i] > 0)
{
if (!(C_diag_i[cnt+1]-C_diag_i[cnt]) &&
!(C_offd_i[cnt+1]-C_offd_i[cnt]))
CF_marker[i] = 2;
cnt++;
}
}
C_diag_size = C_diag_i[num_coarse];
C_offd_size = C_offd_i[num_coarse];
S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse,
global_num_coarse, coarse_row_starts,
coarse_row_starts, num_cols_offd_C, C_diag_size, C_offd_size);
hypre_ParCSRMatrixOwnsRowStarts(S2) = 0;
C_diag = hypre_ParCSRMatrixDiag(S2);
hypre_CSRMatrixI(C_diag) = C_diag_i;
if (num_nonzeros_diag) hypre_CSRMatrixJ(C_diag) = C_diag_j;
C_offd = hypre_ParCSRMatrixOffd(S2);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_ParCSRMatrixOffd(S2) = C_offd;
if (num_cols_offd_C)
{
if (num_nonzeros_offd) hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C;
}
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
hypre_TFree(S_marker);
hypre_TFree(S_marker_offd);
hypre_TFree(S_ext_diag_i);
hypre_TFree(fine_to_coarse);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j);
}
hypre_TFree(S_ext_offd_i);
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j);
}
if (num_cols_offd_S)
{
hypre_TFree(map_S_to_C);
hypre_TFree(CF_marker_offd);
hypre_TFree(fine_to_coarse_offd);
}
*C_ptr = S2;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker)
{
HYPRE_Int i, cnt;
cnt = 0;
for (i=0; i < num_var; i++)
{
if (CF_marker[i] > 0 )
{
if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++];
else { CF_marker[i] = 1; cnt++;}
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening,
* but marks new F-points (previous C-points) as -2
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker)
{
HYPRE_Int i, cnt;
cnt = 0;
for (i=0; i < num_var; i++)
{
if (CF_marker[i] > 0 )
{
if (new_CF_marker[cnt] == -1) CF_marker[i] = -2;
else CF_marker[i] = 1;
cnt++;
}
}
return 0;
}
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <xgboost/data.h>
#include <vector>
#include <type_traits> // enable_if
#include "host_device_vector.h"
#include "common.h"
#include "span.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, GPUSet devices, bool reshard) :
func_(func), range_{std::move(range)},
distribution_{std::move(GPUDistribution::Block(devices))},
reshard_{reshard} {}
Evaluator(Functor func, Range range, GPUDistribution dist,
bool reshard) :
func_(func), range_{std::move(range)}, distribution_{std::move(dist)},
reshard_{reshard} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = !distribution_.IsEmpty();
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->DeviceSpan(_device);
return span;
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->ConstDeviceSpan(_device);
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Reshard.
template <typename T>
void UnpackReshard(GPUDistribution dist, const HostDeviceVector<T>* vector) const {
vector->Reshard(dist);
}
template <typename Head, typename... Rest>
void UnpackReshard(GPUDistribution dist,
const HostDeviceVector<Head>* _vector,
const HostDeviceVector<Rest>*... _vectors) const {
_vector->Reshard(dist);
UnpackReshard(dist, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (reshard_)
UnpackReshard(distribution_, _vectors...);
GPUSet devices = distribution_.Devices();
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
size_t device_beg = *(devices.begin());
size_t device_end = *(devices.end());
#pragma omp parallel for schedule(static, 1) if (devices.Size() > 1)
for (omp_ulong device = device_beg; device < device_end; ++device) { // NOLINT
// Ignore other attributes of GPUDistribution for spliting index.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = GPUDistribution::Block(devices).ShardSize(
range_size, devices.Index(device));
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device));
const int GRID_SIZE =
static_cast<int>(dh::DivRoundUp(*(range_.end()), kBlockThreads));
detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>(
_func, shard_range, UnpackHDV(_vectors, device)...);
dh::safe_cuda(cudaGetLastError());
dh::safe_cuda(cudaDeviceSynchronize());
}
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
auto end = *(range_.end());
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
func(idx, UnpackHDV(vectors)...);
}
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether resharding for vectors is required. */
bool reshard_;
GPUDistribution distribution_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param devices GPUSet specifying GPUs to use, when compiling for CPU,
* this should be GPUSet::Empty().
* \param reshard Whether Reshard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUSet const devices,
bool const reshard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(devices), reshard};
}
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUDistribution const dist,
bool const reshard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(dist), reshard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
spectral_sequence_reduction.h | /* Copyright 2013 IST Austria
Contributed by: Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include "../helpers/misc.h"
#include "../boundary_matrix.h"
namespace phat {
class spectral_sequence_reduction {
public:
template< typename Representation >
void operator () ( boundary_matrix< Representation >& boundary_matrix ) {
const index nr_columns = boundary_matrix.get_num_cols();
std::vector< index > lowest_one_lookup( nr_columns, -1 );
//const index num_stripes = (index) sqrt( (double)nr_columns );
const index num_stripes = omp_get_max_threads();
index block_size = ( nr_columns % num_stripes == 0 ) ? nr_columns / num_stripes : block_size = nr_columns / num_stripes + 1;
std::vector< std::vector< index > > unreduced_cols_cur_pass( num_stripes );
std::vector< std::vector< index > > unreduced_cols_next_pass( num_stripes );
for( index cur_dim = boundary_matrix.get_max_dim(); cur_dim >= 1 ; cur_dim-- ) {
#pragma omp parallel for schedule( guided, 1 )
for( index cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index col_begin = cur_stripe * block_size;
index col_end = std::min( (cur_stripe+1) * block_size, nr_columns );
for( index cur_col = col_begin; cur_col < col_end; cur_col++ )
if( boundary_matrix.get_dim( cur_col ) == cur_dim && boundary_matrix.get_max_index( cur_col ) != -1 )
unreduced_cols_cur_pass[ cur_stripe ].push_back( cur_col );
}
for( index cur_pass = 0; cur_pass < num_stripes; cur_pass++ ) {
boundary_matrix.sync();
#pragma omp parallel for schedule( guided, 1 )
for( int cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index row_begin = (cur_stripe - cur_pass) * block_size;
index row_end = row_begin + block_size;
unreduced_cols_next_pass[ cur_stripe ].clear();
for( index idx = 0; idx < (index)unreduced_cols_cur_pass[ cur_stripe ].size(); idx++ ) {
index cur_col = unreduced_cols_cur_pass[ cur_stripe ][ idx ];
index lowest_one = boundary_matrix.get_max_index( cur_col );
while( lowest_one != -1 && lowest_one >= row_begin && lowest_one < row_end && lowest_one_lookup[ lowest_one ] != -1 ) {
boundary_matrix.add_to( lowest_one_lookup[ lowest_one ], cur_col );
lowest_one = boundary_matrix.get_max_index( cur_col );
}
if( lowest_one != -1 ) {
if( lowest_one >= row_begin && lowest_one < row_end ) {
lowest_one_lookup[ lowest_one ] = cur_col;
boundary_matrix.clear( lowest_one );
boundary_matrix.finalize( cur_col );
} else {
unreduced_cols_next_pass[ cur_stripe ].push_back( cur_col );
}
}
}
unreduced_cols_next_pass[ cur_stripe ].swap( unreduced_cols_cur_pass[ cur_stripe ] );
}
}
}
}
};
}
|
residualbased_block_builder_and_solver_with_constraints_for_chimera.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Authors: Aditya Ghantasala, https://github.com/adityaghantasala
// Navaneeth K Narayanan
// Rishith Ellath Meethal
//
#if !defined(RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER_WITH_CONSTRAINTS_FOR_CHIMERA)
#define RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER_WITH_CONSTRAINTS_FOR_CHIMERA
/* System includes */
#include <unordered_set>
#include <unordered_map>
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "includes/master_slave_constraint.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera
* @ingroup KratosChimeraApplication
* @brief Current class provides an implementation for applying the chimera constraints that is enforcing continuity.
* @details This implementation enforces continuity necessary for chimera in the following way :
*
*
*
* L = [I 0 0 ]
* [0 I 0 ]
*
* K_mod = L'KT
* F_mod = L'(F-K*g)
*
* Where T has the same definition as that of the classical master slave constraints
*
* @author Aditya Ghantasala
*/
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class KRATOS_API(CHIMERA_APPLICATION) ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera
: public ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
/// Definition of the base class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
// The size_t types
typedef typename BaseType::IndexType IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera);
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
explicit ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mL.resize(0,0,false);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera";
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mL; /// This is L matrix described above (at class definition)
///@}
///@name Protected operators
///@{
///@}
///@name Protected Operations
///@{
void ConstructMasterSlaveConstraintsStructure(ModelPart &rModelPart) override
{
BaseType::ConstructMasterSlaveConstraintsStructure(rModelPart);
if (rModelPart.MasterSlaveConstraints().size() > 0)
{
mL = BaseType::mT;
}
}
void BuildMasterSlaveConstraints(ModelPart &rModelPart) override
{
KRATOS_TRY
BaseType::BuildMasterSlaveConstraints(rModelPart);
// Setting the master dofs into the T and C system
for (auto eq_id : BaseType::mMasterIds)
{
mL(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : BaseType::mInactiveSlaveDofs)
{
mL(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0)
{
double start_constraints = OpenMPUtils::GetCurrentTime();
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType L_transpose_matrix(mL.size2(), mL.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(L_transpose_matrix, mL, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(L_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
b_modified.resize(0, false); //free memory
TSystemMatrixType auxiliar_A_matrix(BaseType::mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(L_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
L_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, BaseType::mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
double max_diag = 0.0;
for (IndexType i = 0; i < rA.size1(); ++i)
{
max_diag = std::max(std::abs(rA(i, i)), max_diag);
}
// Apply diagonal values on slaves BaseType::mDofSet.size()
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(BaseType::mSlaveIds.size()); ++i)
{
const IndexType slave_equation_id = BaseType::mSlaveIds[i];
if (BaseType::mInactiveSlaveDofs.find(slave_equation_id) == BaseType::mInactiveSlaveDofs.end())
{
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
const double stop_constraints = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera", this->GetEchoLevel() >= 1 )<< "Applying constraints time: " << stop_constraints - start_constraints << std::endl;
}
KRATOS_CATCH("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolverWithConstraintsForChimera */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
npdot.c | #include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/*
* numpy.dot may call unoptimized blas
*/
void NPdgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double *a, double *b, double *c,
const double alpha, const double beta)
{
a += offseta;
b += offsetb;
c += offsetc;
size_t stride;
int nthread = 1;
int i, di, nblk;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
const double D0 = 0;
double *cpriv;
int ij, j, stride_b;
if (trans_a == 'N') {
stride = lda;
} else {
stride = 1;
}
if (trans_b == 'N') {
stride_b = 1;
} else {
stride_b = ldb;
}
if (beta == 0) {
for (i = 0; i < n; i++) {
memset(c+i*ldc, 0, sizeof(double)*m);
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*ldc+j] *= beta;
}
}
}
#pragma omp parallel default(none) \
shared(a, b, c, stride, stride_b, nthread, nblk) \
private(i, ij, j, di, cpriv)
{
nthread = omp_get_num_threads();
nblk = MAX((k+nthread-1) / nthread, 1);
cpriv = malloc(sizeof(double) * m * n);
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &n, &di,
&alpha, a+stride*i*nblk, &lda,
b+stride_b*i*nblk, &ldb,
&D0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*ldc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n+4) { // parallelize m
if (trans_a == 'N') {
stride = 1;
} else {
stride = lda;
}
#pragma omp parallel default(none) \
shared(a, b, c, stride, nthread, nblk) \
private(i, di)
{
nthread = omp_get_num_threads();
nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &di, &n, &k,
&alpha, a+stride*i*nblk, &lda, b, &ldb,
&beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
if (trans_b == 'N') {
stride = ldb;
} else {
stride = 1;
}
#pragma omp parallel default(none) \
shared(a, b, c, stride, nthread, nblk) \
private(i, di)
{
nthread = omp_get_num_threads();
nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &di, &k,
&alpha, a, &lda, b+stride*i*nblk, &ldb,
&beta, c+ldc*i*nblk, &ldc);
}
}
}
}
}
|
droplet_impact_coupled.c | /* droplet_impact_coupled.c
A 2D droplet falling towards an elastic membrane lying
along the boundary at y = 0. The solution for the membrane is given as a
function of pressure by the routines defined in wave-equation.h, and its
velocity is fed back into Basilisk by altering the boundary condition.
Runs until the turnover point approximately reaches the initial droplet
radius.
Author: Michael Negus
*/
#define FILTERED
#define mu(f) (1./(clamp(f,0,1)*(1./mu1 - 1./mu2) + 1./mu2))
#include "parameters.h" // Includes all defined parameters
#include "axi.h" // Axisymmetric coordinates
#include "navier-stokes/centered.h" // To solve the Navier-Stokes
#include "two-phase.h" // Implements two-phase flow
#include "view.h" // Creating movies using bview
#include "tension.h" // Surface tension of droplet
#include "tag.h" // For removing small droplets
#include "heights.h"
#include "contact.h" // For imposing contact angle on the surface
#include <omp.h> // For openMP parallel
#include <stdlib.h>
/* Computational constants derived from parameters */
double MIN_CELL_SIZE; // Size of the smallest cell
double DROP_REFINED_WIDTH; // Width of the refined area around the droplet
double MEMBRANE_REFINE_NO; // Number of grid cells above the membrane to refine
double MEMBRANE_REFINED_HEIGHT; // Width of the refined area above the membrane
double DROP_CENTRE; // Initial centre position of the droplet
double IMPACT_TIME; // Theoretical time of impact
/* Global variables */
double start_wall_time; // Time the simulation was started
double end_wall_time; // Time the simulation finished
int gfs_output_no = 0; // Records how many GFS files have been outputted
int log_output_no = 0; // Records how many plate data files there have been
int interface_output_no = 0; // Records how many interface files there have been
int membrane_output_no = 0; // Records how many membrane outputs there have been
int start_membrane = 0; // Boolean to indicate if membrane motion has started
double drop_thresh = 1e-4; // Remove droplets threshold
double pinch_off_time = 0.; // Time of pinch-off
/* Contact angle variables */
vector h[]; // Height function
double theta0 = 90; // Contact angle in degrees
/* Boundary conditions */
// Conditions on surface
uf.n[left] = dirichlet(0.); // No flow through surface
uf.t[left] = dirichlet(0.); // No slip at surface
h.t[left] = contact_angle (theta0*pi/180.); // RC contact angle
// Conditions for entry from above
u.n[right] = neumann(0.); // Free flow condition
p[right] = dirichlet(0.); // 0 pressure far from surface
// Conditions far from the droplet in the radial direction
u.n[top] = neumann(0.); // Allows outflow through boundary
u.t[top] = dirichlet(0.); // Stationary vertical flow
p[top] = dirichlet(0.); // 0 pressure far from surface
int main() {
/* Main function for running the simulation */
/* Create the computational domain */
init_grid(1 << MINLEVEL); // Create grid according to the minimum level
size(BOX_WIDTH); // Size of the domain
/* Set physical constants */
rho1 = 1.; // Density of water phase
rho2 = RHO_R; // Density of air phase
mu1 = 1. / REYNOLDS; // Viscosity of water phase
mu2 = mu1 * MU_R; // Viscosity of air phase
f.height = h; // For contact angle calculation
f.sigma = 1. / WEBER; // Surface tension at interface
/* Derived constants */
MIN_CELL_SIZE = BOX_WIDTH / pow(2, MAXLEVEL); // Size of the smallest cell
DROP_REFINED_WIDTH = 0.05; // Refined region around droplet
DROP_CENTRE = INITIAL_DROP_HEIGHT + DROP_RADIUS; // Initial centre of drop
IMPACT_TIME = INITIAL_DROP_HEIGHT / (-DROP_VEL); // Theoretical impact time
MEMBRANE_REFINE_NO = 8; // Number of cells above membrane to refine by
MEMBRANE_REFINED_HEIGHT = MEMBRANE_REFINE_NO * MIN_CELL_SIZE;
/* Creates log file */
FILE *logfile = fopen("log", "w");
fclose(logfile);
/* Poisson solver constants */
DT = 1.0e-4; // Minimum timestep
NITERMIN = 1; // Min number of iterations (default 1)
NITERMAX = 300; // Max number of iterations (default 100)
TOLERANCE = 1e-6; // Possion solver tolerance (default 1e-3)
/* Runs the simulation */
run();
}
event init(t = 0) {
/* Initialises the flow as a spherical droplet falling downwards */
// Records the wall time
start_wall_time = omp_get_wtime();
/* Refines around the droplet */
refine((sq(x - DROP_CENTRE) + sq(y) < sq(DROP_RADIUS + DROP_REFINED_WIDTH)) \
&& (sq(x - DROP_CENTRE) + sq(y) > sq(DROP_RADIUS - DROP_REFINED_WIDTH)) \
&& (level < MAXLEVEL));
/* Initialises the droplet volume fraction */
fraction(f, -sq(x - DROP_CENTRE) - sq(y) + sq(DROP_RADIUS));
/* Initialise the droplet velocity downwards */
foreach() {
u.x[] = DROP_VEL * f[];
}
boundary ((scalar *){u});
}
event refinement (i++) {
/* Adaptive grid refinement */
// Adapts with respect to velocities and volume fraction
adapt_wavelet ({u.x, u.y, f}, (double[]){1e-3, 1e-3, 1e-6},
minlevel = MINLEVEL, maxlevel = MAXLEVEL);
/* Attempts to refine above the membrane, doubling the refine height until
successful */
double refine_height = MEMBRANE_REFINED_HEIGHT;
int adequate_refinement = 0;
while (adequate_refinement == 0) {
// Attempts to refine
refine((y < MEMBRANE_RADIUS) && (x <= refine_height) \
&& level < MAXLEVEL);
// Loops and check if refinement was successful
adequate_refinement = 1;
foreach_boundary(left) {
if ((y < MEMBRANE_RADIUS) && (level < MAXLEVEL)) {
adequate_refinement = 0;
break;
}
}
// If refinement was unsuccessful, then double the refined height
if (adequate_refinement == 0) refine_height = 2 * refine_height;
}
}
event gravity (i++) {
/* Adds acceleration due to gravity in the vertical direction */
face vector av = a; // Acceleration at each face
foreach_face(x) av.y[] -= 1./sq(FR); // Adds acceleration due to gravity
}
event small_droplet_removal (t += 1e-4) {
/* Removes any small droplets or bubbles that have formed, that are smaller than
a specific size */
// Removes droplets of diameter 5 cells or less
int remove_droplet_radius = min(16, (int)(0.2 / MIN_CELL_SIZE));
remove_droplets(f, remove_droplet_radius);
// Also remove air bubbles
remove_droplets(f, remove_droplet_radius, 1e-4, true);
}
event output_data (t += LOG_OUTPUT_TIMESTEP) {
/* Outputs data about the flow */
/* Outputs data to log file */
printf("t = %.5f, v = %.8f\n", t, 2 * pi * statsf(f).sum);
FILE *logfile = fopen("log", "a");
fprintf(logfile, "t = %.5f, v = %.8f\n", t, 2 * pi * statsf(f).sum);
fclose(logfile);
log_output_no++;
}
event output_interface (t += PLATE_OUTPUT_TIMESTEP) {
/* Outputs the interface locations of the droplet */
// Creates text file to save output to
char interface_filename[80];
sprintf(interface_filename, "interface_%d.txt", interface_output_no);
FILE *interface_file = fopen(interface_filename, "w");
// Outputs the interface locations and closes the file
output_facets(f, interface_file);
fclose(interface_file);
interface_output_no++;
}
event gfs_output (t += GFS_OUTPUT_TIMESTEP) {
/* Saves a gfs file */
char gfs_filename[80];
sprintf(gfs_filename, "gfs_output_%d.gfs", gfs_output_no);
output_gfs(file = gfs_filename);
gfs_output_no++;
}
event movies (t += 5e-3) {
/* Produces movies using bview */
if (MOVIES) {
// Creates a string with the time to put on the plots
char time_str[80];
sprintf(time_str, "t = %g\n", t);
/* Zoomed out view */
// Set up bview box
view (width = 1024, height = 1024, fov = 20.0, ty = -0.5, tx = 0.5, quat = {0, 0, -0.707, 0.707});
/* Movie of the volume fraction of the droplet */
clear();
draw_vof("f", lw = 2);
squares("f", linear = true, spread = -1, map = cool_warm); // RC - minor changes here and beyond
draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2);
save ("tracer.mp4");
/* Movie of the horiztonal velocity */
clear();
draw_vof("f", lw = 2);
squares("u.x", spread = -1, linear = true, map = cool_warm);
draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2);
save ("horizontal_vel.mp4");
/* Movie of the vertical velocity */
clear();
draw_vof("f", lw = 2);
squares("u.y", min = -1.5, max = 1.5, linear = true, spread = -1, map = cool_warm);
draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2);
save ("vertical_vel.mp4");
/* Movie of the pressure */
clear();
draw_vof("f", lw = 2);
squares("p", spread = -1, linear = true, map = cool_warm);
draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2);
save ("pressure.mp4");
/* Zoomed in view of pressure around entrapped bubble */
// Set up bview box
view (width = 1024, height = 1024, fov = 2.0, ty = -0.05, tx = -0.05);
clear();
draw_vof("f", lw = 2);
squares("u.y", min = -1.5, max = 1.5, linear = true, spread = -1, map = cool_warm);
draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2);
save ("zoomed_vertical_vel.mp4");
}
}
event end (t = MAX_TIME) {
/* Ends the simulation */
end_wall_time = omp_get_wtime(); // Records the time of finish
fprintf(stderr, "Finished after %g seconds\n", \
end_wall_time - start_wall_time);
FILE *logfile = fopen("log", "a");
fprintf(logfile, "Finished after %g seconds\n", end_wall_time - start_wall_time);
fclose(logfile);
}
// void output_arrays(double *w_arr, double *w_deriv_arr, double *p_arr) {
// /* output_membrane
// Outputs the x positions of the membrane into a text file
// */
// char w_filename[40];
// sprintf(w_filename, "w_%d.txt", membrane_output_no);
// FILE *w_file = fopen(w_filename, "w");
// char w_deriv_filename[40];
// sprintf(w_deriv_filename, "w_deriv_%d.txt", membrane_output_no);
// FILE *w_deriv_file = fopen(w_deriv_filename, "w");
// char p_filename[40];
// sprintf(p_filename, "p_%d.txt", membrane_output_no);
// FILE *p_file = fopen(p_filename, "w");
// // Outputs from x = 0 to L - dx
// #pragma omp parallel for
// for (int k = 0; k < M; k++) {
// double x = k * DELTA_X;
// // fprintf(w_file, "%.10f, %.10f\n", x, w_arr[k]);
// // fprintf(w_deriv_file, "%.10f, %.10f\n", x, w_deriv_arr[k]);
// // fprintf(p_file, "%.10f, %.10f\n", x, p_arr[k]);
// fprintf(w_file, "%g, %g\n", x, w_arr[k]);
// fprintf(w_deriv_file, "%g, %g\n", x, w_deriv_arr[k]);
// fprintf(p_file, "%g, %g\n", x, p_arr[k]);
// }
// // Outputs x = L, where w and w_deriv = 0
// double x = M * DELTA_X;
// fprintf(w_file, "%.10f, %.10f\n", x, 0.0);
// fprintf(p_file, "%.10f, %.10f\n", x, 0.0);
// fprintf(w_deriv_file, "%.10f, %.10f", x, 0.0);
// fclose(w_file);
// fclose(p_file);
// fclose(w_deriv_file);
// membrane_output_no++;
// }
// void output_arrays_stationary(double *p_arr) {
// /* output_membrane_stationary
// Outputs the x positions of the pressure in a text file
// */
// char p_filename[40];
// sprintf(p_filename, "p_%d.txt", membrane_output_no);
// FILE *p_file = fopen(p_filename, "w");
// // Outputs from x = 0 to L - dx
// #pragma omp parallel for
// for (int k = 0; k < M; k++) {
// double x = k * DELTA_X;
// fprintf(p_file, "%g, %g\n", x, p_arr[k]);
// }
// // Outputs x = L, where w and w_deriv = 0
// double x = M * DELTA_X;
// fprintf(p_file, "%.10f, %.10f\n", x, 0.0);
// fclose(p_file);
// membrane_output_no++;
// }
|
target_x86.c | /*****************************************************************************
*
* target_x86.c
*
* Implementation is serial or OpenMP.
*
* Edinburgh Soft Matter and Statistical Physics Group and
* Edinburgh Parallel Computing Centre
*
* (c) 2018 The University of Edinburgh
*
* Contributing authors:
* Alan Gray (Late of this parish)
* Kevin Stratford (kevin@epcc.ed.ac.uk)
*
*****************************************************************************/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "target.h"
/* Globally reserved names. */
dim3 threadIdx;
dim3 blockIdx;
dim3 gridDim = {1, 1, 1};
dim3 blockDim = {1, 1, 1};
static tdpError_t lastError = tdpSuccess;
static char lastErrorString[BUFSIZ] = "";
static int staticStream;
/* Utilities */
static void error_boke(int line, tdpError_t error) {
fprintf(stderr, "File %s line %d error %s\n", __FILE__, line,
tdpGetErrorName(error));
exit(0);
}
#define errors_make_me_boke(error) error_boke(__LINE__, error)
#define error_return_if(expr, error) \
do { if ((expr)) { \
lastError = error; \
errors_make_me_boke(error); \
return error; \
} \
} while(0)
#define error_return(error) \
error_return_if(1, error)
void tdpErrorHandler(tdpError_t ifail, const char * file, int line, int fatal) {
if (ifail != tdpSuccess) {
printf("tdpErrorHandler: %s:%d %s %s\n", file, line, tdpGetErrorName(ifail),
tdpGetErrorString(ifail));
if (fatal) exit(ifail);
}
return;
}
/*****************************************************************************
*
* tdpThreadModelInfo
*
* Provide spme information on the thread model.
*
*****************************************************************************/
__host__ tdpError_t tdpThreadModelInfo(FILE * fp) {
assert(fp);
#ifndef _OPENMP
fprintf(fp, "Target thread model: None.\n");
#else
fprintf(fp, "Target thread model: OpenMP.\n");
fprintf(fp, "OpenMP threads: %d; maximum number of threads: %d.\n",
omp_get_max_threads(), omp_get_num_procs());
#endif
return tdpSuccess;
}
/*****************************************************************************
*
* tdp_x86_prelaunch
*
* Injected immediately before "kernel launch".
*
*****************************************************************************/
__host__ void tdp_x86_prelaunch(dim3 nblocks, dim3 nthreads) {
gridDim = nblocks;
blockDim = nthreads;
/* sanity checks on user settings here... */
gridDim.x = 1; /* Assert this for host implementation */
/* In case we request fewer threads than are available: */
omp_set_num_threads(blockDim.x*blockDim.y*blockDim.z);
/* Check blockDim, blockIdx ? */
threadIdx.x = omp_get_thread_num();
threadIdx.y = 1;
threadIdx.z = 1;
return;
}
void tdp_x86_postlaunch(void) {
/* Reset the default number of threads. */
omp_set_num_threads(omp_get_max_threads());
return;
}
/*****************************************************************************
*
* tdpDeviceGetCacheConfig
*
*****************************************************************************/
tdpError_t tdpDeviceGetCacheConfig(tdpFuncCache * cacheConfig) {
*cacheConfig = tdpFuncCachePreferNone;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpDeviceSetCacheConfig
*
*****************************************************************************/
tdpError_t tdpDeviceSetCacheConfig(tdpFuncCache cacheConfig) {
/* No op. */
return tdpSuccess;
}
/*****************************************************************************
*
* tdpDeviceSynchronize
*
*****************************************************************************/
tdpError_t tdpDeviceSynchronize(void) {
/* do nothing */
lastError = tdpSuccess;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpFree
*
*****************************************************************************/
tdpError_t tdpFree(void * devPtr) {
error_return_if(devPtr == NULL, tdpErrorInvalidDevicePointer);
free(devPtr);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpFreeHost
*
*****************************************************************************/
tdpError_t tdpFreeHost(void * ptr) {
free(ptr);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpDeviceGetAttribute
*
*****************************************************************************/
tdpError_t tdpDeviceGetAttribute(int * value, tdpDeviceAttr attr, int device) {
assert(value);
assert(0); /* Return some useful information please */
return tdpSuccess;
}
/*****************************************************************************
*
* Return id of device currently being used.
*
*****************************************************************************/
tdpError_t tdpGetDevice(int * device) {
assert(device);
*device = 0;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpGetDeviceCount
*
* Return number of available devices
*
*****************************************************************************/
tdpError_t tdpGetDeviceCount(int * device) {
*device = 0;
#ifdef FAKE_DEVICE /* "Fake" device */
*device = 1;
#endif
/* Strictly, we should return tdpErrorInsufficientDriver or ... */
return tdpErrorNoDevice;
}
/*****************************************************************************
*
* tdpGetDeviceProperties
*
*****************************************************************************/
tdpError_t tdpGetDeviceProperties(struct tdpDeviceProp * prop, int device) {
prop->maxThreadsPerBlock = TARGET_MAX_THREADS_PER_BLOCK;
prop->maxThreadsDim[0] = TARGET_MAX_THREADS_PER_BLOCK;
prop->maxThreadsDim[1] = 1;
prop->maxThreadsDim[2] = 1;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpSetDevice
*
*****************************************************************************/
tdpError_t tdpSetDevice(int device) {
error_return_if(device < 0, tdpErrorInvalidDevice);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpGetErrorName
*
*****************************************************************************/
#define CASE_RETURN(x) case(x): return #x; break
const char * tdpGetErrorName(tdpError_t error) {
switch (error) {
CASE_RETURN(tdpSuccess);
CASE_RETURN(tdpErrorMissingConfiguration);
CASE_RETURN(tdpErrorMemoryAllocation);
CASE_RETURN(tdpErrorInitializationError);
CASE_RETURN(tdpErrorLaunchFailure);
CASE_RETURN(tdpErrorLaunchTimeout);
CASE_RETURN(tdpErrorLaunchOutOfResources);
CASE_RETURN(tdpErrorInvalidDeviceFunction);
CASE_RETURN(tdpErrorInvalidSymbol);
CASE_RETURN(tdpErrorInvalidDevicePointer);
CASE_RETURN(tdpErrorInvalidResourceHandle);
default:
fprintf(stderr, "Unrecognised error code was %d\n", error);
}
return "Unrecognised error code";
}
/*****************************************************************************
*
* tdpGetErrorString
*
*****************************************************************************/
const char * tdpGetErrorString(tdpError_t ifail) {
return "";
}
/*****************************************************************************
*
* tdpPeekAtLastError
*
*****************************************************************************/
tdpError_t tdpPeekAtLastError(void) {
return lastError;
}
/*****************************************************************************
*
* tdpGetLastError
*
*****************************************************************************/
tdpError_t tdpGetLastError(void) {
tdpError_t last = lastError;
lastError = tdpSuccess;
strcpy(lastErrorString, "");
return last;
}
/*****************************************************************************
*
* tdpGetSymbolAddress
*
*****************************************************************************/
tdpError_t tdpGetSymbolAddress(void ** devptr, const void * symbol) {
assert(devptr);
assert(symbol);
error_return_if(symbol == NULL, tdpErrorInvalidSymbol);
*devptr = (void *) symbol;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpHostAlloc
*
*****************************************************************************/
tdpError_t tdpHostAlloc(void ** phost, size_t size, unsigned int flags) {
void * ptr = NULL;
error_return_if(phost == NULL, tdpErrorInvalidValue);
switch (flags) {
case tdpHostAllocDefault:
case tdpHostAllocPortable:
case tdpHostAllocMapped:
case tdpHostAllocWriteCombined:
ptr = malloc(size);
error_return_if(ptr == NULL, tdpErrorMemoryAllocation);
*phost = ptr;
break;
default:
error_return(tdpErrorInvalidValue);
}
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMalloc
*
*****************************************************************************/
tdpError_t tdpMalloc(void ** devPtr, size_t size) {
assert(devPtr);
*devPtr = malloc(size);
error_return_if(*devPtr == NULL, tdpErrorMemoryAllocation);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMallocManaged
*
*****************************************************************************/
tdpError_t tdpMallocManaged(void ** devptr, size_t size, unsigned int flag) {
void * ptr = NULL;
unsigned int valid = (tdpMemAttachGlobal | tdpMemAttachHost);
assert(devptr);
error_return_if(size < 1, tdpErrorInvalidValue);
error_return_if((flag & (~valid)), tdpErrorInvalidValue);
ptr = malloc(size);
error_return_if(ptr == NULL, tdpErrorMemoryAllocation);
*devptr = ptr;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMemcpy
*
*****************************************************************************/
tdpError_t tdpMemcpy(void * dst, const void * src, size_t count,
tdpMemcpyKind kind) {
assert(dst);
assert(src);
error_return_if(count < 1, tdpErrorInvalidValue);
switch (kind) {
case tdpMemcpyHostToDevice:
error_return_if(dst == NULL, tdpErrorInvalidDevicePointer);
memcpy(dst, src, count);
break;
case tdpMemcpyDeviceToHost:
error_return_if(src == NULL, tdpErrorInvalidDevicePointer);
memcpy(dst, src, count);
break;
case tdpMemcpyHostToHost:
memcpy(dst, src, count);
break;
case tdpMemcpyDeviceToDevice:
memcpy(dst, src, count);
break;
case tdpMemcpyDefault:
default:
error_return(tdpErrorInvalidMemcpyDirection);
}
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMemcpyFromSymbol
*
*****************************************************************************/
tdpError_t tdpMemcpyFromSymbol(void * dst, const void * symbol,
size_t count, size_t offset,
tdpMemcpyKind kind) {
assert(dst);
assert(symbol);
error_return_if(count < 1, tdpErrorInvalidValue);
error_return_if(offset != 0, tdpErrorInvalidValue);
switch (kind) {
case tdpMemcpyDefault:
case tdpMemcpyDeviceToHost:
error_return_if(symbol == NULL, tdpErrorInvalidSymbol);
memcpy(dst, symbol, count);
break;
case tdpMemcpyDeviceToDevice:
error_return_if(dst == NULL, tdpErrorInvalidDevicePointer);
error_return_if(symbol == NULL, tdpErrorInvalidSymbol);
memcpy(dst, symbol, count);
break;
case tdpMemcpyHostToDevice:
assert(0);
case tdpMemcpyHostToHost:
assert(0);
default:
error_return(tdpErrorInvalidMemcpyDirection);
}
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMemcpyToSymbol
*
* CUDA wants "const void * symbol", but this is avoided as we need
* a memset(void * dst, const void * src, ...) .
*
*****************************************************************************/
tdpError_t tdpMemcpyToSymbol(void * symbol, const void * src,
size_t count, size_t offset,
tdpMemcpyKind kind) {
assert(symbol);
assert(src);
error_return_if(count < 1, tdpErrorInvalidValue);
error_return_if(offset != 0, tdpErrorInvalidValue);
switch (kind) {
case tdpMemcpyDefault:
case tdpMemcpyHostToDevice:
error_return_if(symbol == NULL, tdpErrorInvalidSymbol);
memcpy(symbol, src, count);
break;
case tdpMemcpyDeviceToDevice:
error_return_if(src == NULL, tdpErrorInvalidDevicePointer);
memcpy(symbol, src, count);
break;
case tdpMemcpyDeviceToHost:
case tdpMemcpyHostToHost:
default:
error_return(tdpErrorInvalidMemcpyDirection);
}
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMemset
*
*****************************************************************************/
tdpError_t tdpMemset(void * devPtr, int value, size_t count) {
error_return_if(devPtr == NULL, tdpErrorInvalidDevicePointer);
error_return_if(value < 0, tdpErrorInvalidValue);
error_return_if(value > 255, tdpErrorInvalidValue);
memset(devPtr, value, count);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpStreamCreate
*
*****************************************************************************/
tdpError_t tdpStreamCreate(tdpStream_t * stream) {
error_return_if(stream == NULL, tdpErrorInvalidValue);
*stream = &staticStream;
return tdpSuccess;
}
/*****************************************************************************
*
* tdpStreamDestroy
*
*****************************************************************************/
tdpError_t tdpStreamDestroy(tdpStream_t stream) {
error_return_if(stream != &staticStream, tdpErrorInvalidResourceHandle);
return tdpSuccess;
}
/*****************************************************************************
*
* tdpStreamDestroy
*
*****************************************************************************/
tdpError_t tdpStreamSynchronize(tdpStream_t stream) {
error_return_if(stream != &staticStream, tdpErrorInvalidResourceHandle);
/* Success */
return tdpSuccess;
}
/*****************************************************************************
*
* tdpMemcpyAsync
*
*****************************************************************************/
tdpError_t tdpMemcpyAsync(void * dst, const void * src, size_t count,
tdpMemcpyKind kind, tdpStream_t stream) {
/* Just ignore the stream argument and copy immediately */
return tdpMemcpy(dst, src, count, kind);
}
static int int_max(int a, int b) {return (a > b) ?a :b;}
static int int_min(int a, int b) {return (a < b) ?a :b;}
/*****************************************************************************
*
* tdpAtomicAddInt
*
*****************************************************************************/
__device__ int tdpAtomicAddInt(int * sum, int val) {
int old;
assert(sum);
#ifdef _OPENMP
#pragma omp critical(atomicAddInt)
{
old = *sum;
*sum += val;
}
#else
old = *sum;
*sum += val;
#endif
return old;
}
/*****************************************************************************
*
* tdpAtomicMaxInt
*
* maxval expected to be __shared__
*
*****************************************************************************/
__device__ int tdpAtomicMaxInt(int * maxval, int val) {
int old;
assert(maxval);
#ifdef _OPENMP
/* Ug. */
#pragma omp critical (atomicMaxInt)
{
old = *maxval;
*maxval = int_max(*maxval, val);
}
#else
old = *maxval;
*maxval = int_max(*maxval, val);
#endif
return old;
}
/*****************************************************************************
*
* tdpAtomicMinInt
*
*****************************************************************************/
__device__ int tdpAtomicMinInt(int * minval, int val) {
int old;
assert(minval);
#ifdef _OPENMP
#pragma omp critical (tdpAtomicMinInt)
{
old = *minval;
*minval = int_min(*minval, val);
}
#else
old = *minval;
*minval = int_min(*minval, val);
#endif
return old;
}
/*****************************************************************************
*
* tdpAtomicAddDouble
*
*****************************************************************************/
__device__ double tdpAtomicAddDouble(double * sum, double val) {
double old;
assert(sum);
#ifdef _OPENMP
/* Could use "omp capture" here, but not entirely portable without warning */
#pragma omp critical(tdpAtomicAddDouble)
{
old = *sum;
*sum += val;
}
#else
old = *sum;
*sum += val;
#endif
return old;
}
static double double_max(double a, double b) {return (a > b) ?a :b;}
static double double_min(double a, double b) {return (a < b) ?a :b;}
/*****************************************************************************
*
* tdpAtomicMaxDouble
*
*****************************************************************************/
__device__ double tdpAtomicMaxDouble(double * maxval, double val) {
double old;
assert(maxval);
#ifdef _OPENMP
#pragma omp critical (atomicMaxDouble)
{
old = *maxval;
*maxval = double_max(*maxval, val);
}
#else
old = *maxval;
*maxval = double_max(*maxval, val);
#endif
return old;
}
/*****************************************************************************
*
* tdpAtomicMinDouble
*
*****************************************************************************/
__device__ double tdpAtomicMinDouble(double * minval, double val) {
double old;
assert(minval);
#ifdef _OPENMP
#pragma omp critical (atomicMinDouble)
{
old = *minval;
*minval = double_min(*minval, val);
}
#else
old = *minval;
*minval = double_min(*minval, val);
#endif
return old;
}
/*****************************************************************************
*
* tdpAtomicBlockAddInt
*
* See, e.g.,
* https://devblogs.nvidia.com/parallelforall/
* faster-parallel-reductions-kepler/
*
* The partial sums partsum must be __shared__; they are destroyed
* on exit.
* The result is only significant at thread zero.
*
*****************************************************************************/
__device__ int tdpAtomicBlockAddInt(int * partsum) {
#ifdef _OPENMP
int istr;
int nblock;
int nthread = omp_get_num_threads();
int idx = omp_get_thread_num();
nblock = pow(2, ceil(log(1.0*nthread)/log(2)));
for (istr = nblock/2; istr > 0; istr /= 2) {
#pragma omp barrier
if (idx < istr && idx + istr < nthread) {
partsum[idx] += partsum[idx + istr];
}
}
#endif
return partsum[0];
}
/*****************************************************************************
*
* tdpAtomicBlockAddDouble
*
*****************************************************************************/
__device__ double tdpAtomicBlockAddDouble(double * partsum) {
#ifdef _OPENMP
int istr;
int nblock;
int nthread = omp_get_num_threads();
int idx = omp_get_thread_num();
nblock = pow(2, ceil(log(1.0*nthread)/log(2)));
for (istr = nblock/2; istr > 0; istr /= 2) {
#pragma omp barrier
if (idx < istr && idx + istr < nthread) {
partsum[idx] += partsum[idx + istr];
}
}
#endif
return partsum[0];
}
|
taskloop_nogroup_tied_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_taskloop_nogroup_tied_scheduling() {
int i, vals[6];
memset(vals, 0, sizeof(int) * 6);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(4)
{
// 6 barrier_waits in tasks and 2 barrier_waits in threads
#pragma omp master
{
check_num_ess(4);
#pragma omp taskloop grainsize(1) nogroup
for (i = 0; i < 6; i++) {
timeout_barrier_wait(&barrier, 4);
vals[i] = 1;
}
}
if (omp_get_thread_num() < 2) {
// master does not wait the completion of taskloop.
timeout_barrier_wait(&barrier, 4);
}
}
for (i = 0; i < 6; i++) {
if (vals[i] != 1) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_taskloop_nogroup_tied_scheduling()) {
num_failed++;
}
}
return num_failed;
}
|
findmax.c | #include<stdio.h>
#include<stdlib.h>
#include "generic.h"
#define size 10000
#define NT 8
int arr[size];
int flag[size];//to set flag[i]==1 if arr[i] is maximum
int main(int argc, char *argv[]){
srand(atoi(argv[1]));//Seed for random number command line integer value
//generates random number
for(int i=0;i<size;i++)arr[i]=rand()%1048576;
//initialize flag[i]=1 for 0<=i<=size
for(int i=0;i<size;i++) flag[i]=1;
double t1=rtclock();
#pragma omp parallel for num_threads(NT)
for(int i=0;i<size;i++)
for(int j=0;j<size;j++)
//if arr[i] is not maximum set flag[i]=0
if(arr[i]<arr[j])flag[i]=0;
double t2=rtclock();
printf("\nTIME =%f \n",(t2-t1)*1000);
//print maximum element arr[i] for which flag[i] still 1.
for(int i=0;i<size;i++)if(flag[i]==1)printf("arr[%d]= %d\n",i,arr[i]);
}
/*Run executable-path <integer-seed-value>
*example: ./a.out 3 */
|
GB_unop__sin_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fc32_fc32)
// op(A') function: GB (_unop_tran__sin_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csinf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__tan_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tan_fp32_fp32
// op(A') function: GB_unop_tran__tan_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = tanf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tanf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = tanf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TAN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tan_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = tanf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = tanf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tan_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bs_omp.c |
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <time.h>
#include <stdint.h>
#include "timer.h"
#define DTYPE uint64_t
/*
* @brief creates a "test file" by filling a bufferwith values
*/
void create_test_file(DTYPE * input, uint64_t nr_elements, DTYPE * querys, uint64_t n_querys) {
uint64_t max = UINT64_MAX;
uint64_t min = 0;
srand(time(NULL));
input[0] = 1;
for (uint64_t i = 1; i < nr_elements; i++) {
input[i] = input[i - 1] + (rand() % 10) + 1;
}
for(uint64_t i = 0; i < n_querys; i++)
{
querys[i] = input[rand() % (nr_elements - 2)];
}
}
/**
* @brief compute output in the host
*/
uint64_t binarySearch(DTYPE * input, uint64_t input_size, DTYPE* querys, unsigned n_querys)
{
uint64_t found = -1;
uint64_t q, r, l, m;
#pragma omp parallel for private(q,r,l,m)
for(q = 0; q < n_querys; q++)
{
l = 0;
r = input_size;
while (l <= r)
{
m = l + (r - l) / 2;
// Check if x is present at mid
if (input[m] == querys[q])
{
found += m;
break;
}
// If x greater, ignore left half
if (input[m] < querys[q])
l = m + 1;
// If x is smaller, ignore right half
else
r = m - 1;
}
}
return found;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
Timer timer;
uint64_t input_size = atol(argv[1]);
uint64_t n_querys = atol(argv[2]);
printf("Vector size: %lu, num searches: %lu\n", input_size, n_querys);
DTYPE * input = malloc((input_size) * sizeof(DTYPE));
DTYPE * querys = malloc((n_querys) * sizeof(DTYPE));
DTYPE result_host = -1;
// Create an input file with arbitrary data.
create_test_file(input, input_size, querys, n_querys);
start(&timer, 0, 0);
result_host = binarySearch(input, input_size - 1, querys, n_querys);
stop(&timer, 0);
int status = (result_host);
if (status) {
printf("[OK] Execution time: ");
print(&timer, 0, 1);
printf("ms.\n");
} else {
printf("[ERROR]\n");
}
free(input);
return status ? 0 : 1;
}
|
Example_carrays_fpriv.1.c | /*
* @@name: carrays_fpriv.1c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
*/
#include <assert.h>
int A[2][2] = {1, 2, 3, 4};
void f(int n, int B[n][n], int C[])
{
int D[2][2] = {1, 2, 3, 4};
int E[n][n];
assert(n >= 2);
E[1][1] = 4;
#pragma omp parallel firstprivate(B, C, D, E)
{
assert(sizeof(B) == sizeof(int (*)[n]));
assert(sizeof(C) == sizeof(int*));
assert(sizeof(D) == 4 * sizeof(int));
assert(sizeof(E) == n * n * sizeof(int));
/* Private B and C have values of original B and C. */
assert(&B[1][1] == &A[1][1]);
assert(&C[3] == &A[1][1]);
assert(D[1][1] == 4);
assert(E[1][1] == 4);
}
}
int main() {
f(2, A, A[0]);
return 0;
}
|
TRPO_Lightweight_FPGA.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
#include "lbfgs.h"
#include "Maxfiles.h"
#include "MaxSLiCInterface.h"
double TRPO_Lightweight_FPGA (TRPOparam param, const int NumIter, const size_t NumThreads) {
//////////////////// Read Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
char * ModelFile = param.ModelFile;
char * BaselineFile = param.BaselineFile;
char * ResultFile = param.ResultFile;
const double CG_Damping = param.CG_Damping;
double ResidualTh = 1e-10;
size_t MaxIter = 10;
double MaxKL = 0.01;
double MaxBackTracks = 10;
double AcceptRatio = 0.1;
double gamma = 0.995;
double lam = 0.98;
// Assign Parameters - For FPGA Only
size_t * PaddedLayerSize = param.PaddedLayerSize;
size_t * NumBlocks = param.NumBlocks;
int EvenLayerCompExtraLatency = 2;
// Layer Size of Baseline
size_t LayerSizeBase[4] = {16, 16, 16, 1};
// Dimension of Observation Space and Action Space
const size_t ObservSpaceDim = LayerSize[0];
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// Number of Policy Parameters
size_t NumParams = NumParamsCalc(param.LayerSize, param.NumLayers);
size_t NumParamsBase = NumParamsCalc(LayerSizeBase, NumLayers) - 1;
int PaddedParamsBase = (int)ceil((double)NumParamsBase/16.0)*16;
// iterator when traversing through input vector and result vector
size_t pos;
// Number of Episodes per Batch (NumEpisodes in Def.maxj)
const int NumEpBatch = 21;
// Length of Each Episode - timestep_limit (EpisodeLen in Def.maxj)
const int EpLen = 150;
// Number of Lightweight Simulators
const int NumLightweightSimulators = 7;
// Number of Samples
size_t NumSamples = NumEpBatch * EpLen;
// Length of Each TimeStep (s)
const double TimeStepLen = 0.02;
// Angular Speed = coeff * Activation
double coeff = 1;
// Random Seed
srand(0);
// pi - for Gaussian Random Number generation
const double pi = 3.1415926535897931;
//////////////////// Memory Allocation - Model ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// LogStd[i] is the log of std[i] in the policy
double * LogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Policy Gradient ////////////////////
// The Policy Gradient Vector (PG) is the gradient of Surrogate Loss w.r.t. to policy parameters
// -PG is the input to the Conjugate Gradient (CG) function
// There is one-to-one correspondence between PG and policy parameters (W and B of neural network, LogStd)
double * PGW [NumLayers-1];
double * PGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
PGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
PGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Policy Gradient corresponding to LogStd
double * PGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Episodic Reward ////////////////////
double * episodicRew = (double *) calloc(NumIter, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
double * Action = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Reward = (double *) calloc(NumSamples, sizeof(double));
double * Return = (double *) calloc(NumSamples, sizeof(double));
double * Baseline = (double *) calloc(NumSamples, sizeof(double));
double * Advantage = (double *) calloc(NumSamples, sizeof(double));
double * Observ_FPGA = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean_FPGA = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Action_FPGA = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Reward_FPGA = (double *) calloc(NumSamples, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward and Backward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
// GLayer[i]: Gradient of Loss Function w.r.t. the pre-activation values in Layer[i], i.e. d(Loss)/d(x_i)
double * Layer [NumLayers];
double * GLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
GLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// GW[i]: Gradient of Loss Function w.r.t to Neural Network Weight W[i]
// GB[i]: Gradient of Loss Function w.r.t to Neural Network Bias B[i]
// There is one-to-one correspondence between: GW[i] and W[i], GB[i] and B[i], GStd[i] and Std[i]
double * GW [NumLayers-1];
double * GB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
GW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
GB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// GLogStd[i]: Gradient of Loss Function w.r.t LogStd[i]
double * GLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// RGLogStd[i]: R{} Gradient of KL w.r.t LogStd[i]
double * RGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Conjugate Gradient (CG) ////////////////////
// These names correspond to the names in the TRPO Python code
double * b = (double *) calloc(NumParams, sizeof(double));
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
//////////////////// Memory Allocation - Line Search ////////////////////
// These names correspond to the names in the TRPO Python code
// Note: In Line Search we also need a vector called x
// Here we just use the x declared for Conjugate Gradient for simlicity
// The x used in Line Search has nothing to do with the x used in CG
// They just have the same type and size
double * fullstep = (double *) calloc(NumParams, sizeof(double));
double * theta = (double *) calloc(NumParams, sizeof(double));
double * xnew = (double *) calloc(NumParams, sizeof(double));
//////////////////// Memory Allocation - Baseline ////////////////////
// WBase[i]: Weight Matrix from Layer[i] to Layer[i+1]
// BBase[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in WBase[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item BBase[k] is the bias of Neuron #k in Layer[i+1]
double * WBase [NumLayers-1];
double * BBase [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
WBase[i] = (double *) calloc(LayerSizeBase[i]*LayerSizeBase[i+1], sizeof(double));
BBase[i] = (double *) calloc(LayerSizeBase[i+1], sizeof(double));
}
// GW[i]: Gradient of Loss Function w.r.t to Neural Network Weight W[i]
// GB[i]: Gradient of Loss Function w.r.t to Neural Network Bias B[i]
// There is one-to-one correspondence between: GW[i] and W[i], GB[i] and B[i], GStd[i] and Std[i]
double * GWBase [NumLayers-1];
double * GBBase [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
GWBase[i] = (double *) calloc(LayerSizeBase[i]*LayerSizeBase[i+1], sizeof(double));
GBBase[i] = (double *) calloc(LayerSizeBase[i+1], sizeof(double));
}
// Layer[i] : Memory of each layer's outputs, i.e. y_i
// GLayer[i]: Gradient of Loss Function w.r.t. the pre-activation values in Layer[i], i.e. d(Loss)/d(x_i)s
double * LayerBase [NumLayers];
double * GLayerBase [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
LayerBase[i] = (double *) calloc(LayerSizeBase[i], sizeof(double));
GLayerBase[i] = (double *) calloc(LayerSizeBase[i], sizeof(double));
}
// Pamameters Vector for L-BFGS Optimisation
lbfgsfloatval_t * LBFGS_x = lbfgs_malloc(PaddedParamsBase);
// Objection Function Value
lbfgsfloatval_t LBFGS_fx;
//////////////////// Memory Allocation - MuJoCo Simulation ////////////////////
// Observation Vector and it Mean and Variance (for Filter)
double * ob = (double *) calloc(ObservSpaceDim, sizeof(double));
double * obMean = (double *) calloc(ObservSpaceDim, sizeof(double));
double * obVar = (double *) calloc(ObservSpaceDim, sizeof(double));
// Action Vector (for MuJoCo)
double * ac = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Parameters for Baseline ////////////////////
TRPOBaselineParam BaselineParam;
// Paramaters
BaselineParam.NumLayers = NumLayers;
BaselineParam.ObservSpaceDim = ObservSpaceDim;
BaselineParam.NumEpBatch = NumEpBatch;
BaselineParam.EpLen = EpLen;
BaselineParam.NumSamples = NumSamples;
BaselineParam.NumParams = NumParamsBase;
BaselineParam.PaddedParams = PaddedParamsBase;
BaselineParam.LayerSizeBase = LayerSizeBase;
BaselineParam.AcFunc = AcFunc;
// For Forward Propagation and Back Propagation
BaselineParam.WBase = WBase;
BaselineParam.BBase = BBase;
BaselineParam.LayerBase = LayerBase;
BaselineParam.GWBase = GWBase;
BaselineParam.GBBase = GBBase;
BaselineParam.GLayerBase = GLayerBase;
// Training Data
BaselineParam.Observ = Observ;
BaselineParam.Target = Return; // The prediction target
BaselineParam.Predict = Baseline; // The prediction
//////////////////// Initialisation - Neural Network ////////////////////
// Note: Here we just initialise the Neural Network from a Datafile
// which is the initialisation given by the Python ML Libraries.
// We can also initialise the Neural Network ourselves
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &LogStd[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Initialisation - Baseline ////////////////////
// Note: Here we just initialise the Neural Network from a Datafile
// which is the initialisation given by the Python ML Libraries.
// We can also initialise the Neural Network ourselves
// Open Model File that contains Weights, Bias and std
FILE *BaselineFilePointer = fopen(BaselineFile, "r");
if (BaselineFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open BaselineFile [%s]. \n", BaselineFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSizeBase[i];
size_t nextLayerDim = LayerSizeBase[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(BaselineFilePointer, "%lf", &WBase[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(BaselineFilePointer, "%lf", &BBase[i][k]);
}
}
// Close Baseline Model File
fclose(BaselineFilePointer);
// L-BFGS Optimisation for Baseline Fitting
lbfgs_parameter_t LBFGS_Param;
lbfgs_parameter_init(&LBFGS_Param);
LBFGS_Param.max_iterations = 25;
//////////////////// Initialisation - FPGA Lightweight Simulator ////////////////////
// Load Maxfile and Engine
fprintf(stderr, "[INFO] Initialising FPGA...\n");
max_file_t* maxfile = TRPO_init();
max_engine_t* engine = max_load(maxfile, "*");
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
// Calculate BlockDim
size_t * BlockDim = (size_t *) calloc(NumLayers, sizeof(size_t));
for (int i=0; i<NumLayers; ++i) BlockDim[i] = PaddedLayerSize[i] / NumBlocks[i];
// WeightInit Vector
size_t WeightInitVecLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
WeightInitVecLength += BlockDim[i] * PaddedLayerSize[i+1];
}
size_t WeightInitVecWidth = NumBlocks[0];
// Size of WeightInit Vector - not padded into 384bytes as we use PCI-E
size_t WeightInitVecItems = WeightInitVecLength * WeightInitVecWidth;
double * WeightInit = (double *) calloc(WeightInitVecItems, sizeof(double));
fprintf(stderr,"[INFO] WeightInit Vector Size = %zu bytes.\n", WeightInitVecItems * sizeof(double));
// Length of BiasStd Vector
size_t BiasStdVecLength = PaddedLayerSize[NumLayers-1];
for (size_t i=1; i<NumLayers; ++i) {
BiasStdVecLength += PaddedLayerSize[i];
}
double * BiasStd = (double *) calloc(BiasStdVecLength, sizeof(double));
fprintf(stderr, "[INFO] BiasStd Vector Size = %zu bytes.\n", BiasStdVecLength * sizeof(double));
//////////////////// Initialisation - FPGA Conjugate Gradient ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
// Length of Weight and VWeight Initialisation Vector
int WeightInitVecLength_CG = 2 * WeightInitVecLength;
// Number of Cycles to Run on FPGA - Pipelined Forward and Back Propagation
// Remarks: Here we assume 4 layers
size_t MaxBlkDim0Dim2 = ( (BlockDim[0]>BlockDim[2]) ? BlockDim[0] : BlockDim[2] ) + EvenLayerCompExtraLatency;
size_t FwdCyclesPerSample = BlockDim[0] + (BlockDim[1]-1)*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t BwdCyclesPerSample = BlockDim[1]*MaxBlkDim0Dim2 + (BlockDim[2]+EvenLayerCompExtraLatency)*BlockDim[3];
size_t CyclesPerSample = (FwdCyclesPerSample>BwdCyclesPerSample) ? FwdCyclesPerSample : BwdCyclesPerSample;
size_t PropCyclesTotal = CyclesPerSample * (NumSamples + 1);
// Number of Cycles to Run on FPGA - Read Result Back
size_t FVPLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
FVPLength += PaddedLayerSize[i] * PaddedLayerSize[i+1];
FVPLength += PaddedLayerSize[i+1];
}
int PaddedFVPLength = ((int)ceil((double)FVPLength/2))*2;
// Number of Cycles to Run on FPGA for Each FVP Computation - Total
size_t NumTicks = WeightInitVecLength_CG + PropCyclesTotal + PaddedFVPLength + 20;
// Allocation Memory Space for FVP Result
double * FVPResult = (double *) calloc(PaddedFVPLength, sizeof(double));
// Length of Observation Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ObservVecLength = WeightInitVecLength_CG + NumSamples*BlockDim[0];
size_t ObservVecWidth = NumBlocks[0];
size_t ActualObservVecItems = ObservVecLength * ObservVecWidth;
size_t PaddedObservVecItems = (size_t) 48 * ceil( (double)ActualObservVecItems/48 );
fprintf(stderr, "[INFO] Observation Vector (%zu bytes) padded to %zu bytes\n", ActualObservVecItems*8, PaddedObservVecItems*8);
double * Observation = (double *) calloc(PaddedObservVecItems, sizeof(double));
// Length of DataP Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ActualDataPVecItems = WeightInitVecLength_CG * NumBlocks[0];
size_t PaddedDataPVecItems = (size_t) 48 * ceil( (double)ActualDataPVecItems/48 );
fprintf(stderr, "[INFO] Vector P (%zu bytes) padded to %zu bytes\n", ActualDataPVecItems*8, PaddedDataPVecItems*8);
double * DataP = (double *) calloc(PaddedDataPVecItems, sizeof(double));
// Number of Ticks for each CG iteration
fprintf(stderr, "[INFO] In each CG iteration FPGA will run for %zu cycles.\n", NumTicks);
// Length of BiasStd Vector
size_t BiasStdVecLength_CG = PaddedLayerSize[NumLayers-1];
for (size_t i=1; i<NumLayers; ++i) {
BiasStdVecLength_CG += 2*PaddedLayerSize[i];
}
double * BiasStd_CG = (double *) calloc(BiasStdVecLength_CG, sizeof(double));
//////////////////// Main Loop ////////////////////
// Calculate Time
struct timeval tv1, tv2, tv3, tv4, tv5, tv6, tv7, tv8;
double runtimeS = 0;
double fpgaTime = 0;
double simuTime = 0;
double trpoTime = 0;
double vfTime = 0;
// Run Training for NumIter Iterations
for (int iter=0; iter<NumIter; ++iter) {
// Tic
gettimeofday(&tv1, NULL);
///////// Lightweight Simulation on FPGA /////////
// Feed Weight into WeightInit
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
WeightInit[RowNum*WeightInitVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else WeightInit[RowNum*WeightInitVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Feed Bias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed LogStd into BiasStd
for (size_t k=0; k<PaddedLayerSize[NumLayers-1]; ++k) {
size_t LayerDimLimit = LayerSize[NumLayers-1];
if (k<LayerDimLimit) BiasStd[RowNum] = exp(LogStd[k]);
else BiasStd[RowNum] = 0;
RowNum++;
}
gettimeofday(&tv3, NULL);
// Run Lightweight Simulator on FPGA
TRPO_RunLightweight_actions_t simulation_action;
simulation_action.param_IterNumber = iter;
simulation_action.instream_WeightInit = WeightInit; // 4608 bytes
simulation_action.instream_BiasStdInit = BiasStd; // 320 bytes
simulation_action.outstream_Observ = Observ_FPGA;
simulation_action.outstream_Mean = Mean_FPGA;
simulation_action.outstream_Action = Action_FPGA;
simulation_action.outstream_Reward = Reward_FPGA;
TRPO_RunLightweight_run(engine, &simulation_action);
gettimeofday(&tv4, NULL);
fpgaTime += ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
simuTime += ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
// Re-Ordering FPGA Output Data
size_t WrRowAddr = 0;
for (size_t i=0; i<NumLightweightSimulators; ++i) {
size_t NumSamplesPerSimulator = NumSamples/NumLightweightSimulators;
for (size_t j=0; j<NumSamplesPerSimulator; ++j) {
size_t RdRowAddr = j * NumLightweightSimulators + i;
// Observ
for (size_t k=0; k<ObservSpaceDim; ++k) {
Observ[WrRowAddr*ObservSpaceDim+k] = Observ_FPGA[RdRowAddr*ObservSpaceDim+k];
}
// Mean
for (size_t k=0; k<ActionSpaceDim; ++k) {
Mean[WrRowAddr*ActionSpaceDim+k] = Mean_FPGA[RdRowAddr*ActionSpaceDim+k];
}
// Action
for (size_t k=0; k<ActionSpaceDim; ++k) {
Action[WrRowAddr*ActionSpaceDim+k] = Action_FPGA[RdRowAddr*ActionSpaceDim+k];
}
// Reward
Reward[WrRowAddr] = Reward_FPGA[RdRowAddr];
++WrRowAddr;
}
}
// Feed LogStd into Std
for (int i=0; i<ActionSpaceDim; ++i) Std[i] = exp(LogStd[i]);
///////// Calculate Reward Statistics /////////
double EpRewMean = 0;
for (int i=0; i<NumSamples; ++i) EpRewMean += Reward[i];
EpRewMean = EpRewMean / (double) NumEpBatch;
double EpRewStd = 0;
for (int i=0; i<NumEpBatch; ++i){
double thisEpReward = 0;
for (int j=0; j<EpLen; ++j) {
thisEpReward += Reward[i*EpLen+j];
}
EpRewStd += (thisEpReward-EpRewMean)*(thisEpReward-EpRewMean);
}
EpRewStd = sqrt(EpRewStd / (double) (NumEpBatch));
printf("[INFO] Iteration %d, Episode Rewards Mean = %f, Std = %f\n", iter, EpRewMean, EpRewStd);
episodicRew[iter] = EpRewMean;
///////// Calculate Advantage /////////
// Discount Reward to get Return
for (int ep=0; ep<NumEpBatch; ++ep) {
// Calculate Return
for (int currentStep=0; currentStep<EpLen; ++currentStep) {
// Reward in the current step
pos = ep*EpLen + currentStep;
double thisStepReturn = Reward[pos];
// Discounted future Reward
for (int futureStep=currentStep+1; futureStep<EpLen; ++futureStep) {
thisStepReturn += Reward[ep*EpLen+futureStep] * pow(gamma, futureStep-currentStep);
}
Return[pos] = thisStepReturn;
}
// Using Value Function to estimate return
// For each step in each episode
for (int currentStep=0; currentStep<EpLen; ++currentStep) {
// Obsevation Vector and Normalised Time Step
pos = ep*EpLen + currentStep;
for (int i=0; i<ObservSpaceDim; ++i) {
LayerBase[0][i] = Observ[pos*ObservSpaceDim+i];
}
LayerBase[0][ObservSpaceDim] = (double) currentStep / (double) EpLen;
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSizeBase[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
LayerBase[i+1][j] = BBase[i][j];
for (size_t k=0; k<LayerSizeBase[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
LayerBase[i+1][j] += LayerBase[i][k] * WBase[i][k*LayerSizeBase[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {LayerBase[i+1][j] = tanh(LayerBase[i+1][j]); break;}
// sigmoid Activation Function
case 's': {LayerBase[i+1][j] = 1.0/(1+exp(-LayerBase[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Write result to Baseline
Baseline[pos] = LayerBase[NumLayers-1][0];
}
// Using Reward to temporarily hold 'deltas'
for (int currentStep=0; currentStep<EpLen-1; ++currentStep) {
Reward[ep*EpLen+currentStep] += gamma * Baseline[ep*EpLen+currentStep+1] - Baseline[ep*EpLen+currentStep];
}
Reward[ep*EpLen+EpLen-1] += (-1) * Baseline[ep*EpLen+EpLen-1];
// Calculate the Advantage of this episode
for (int currentStep=0; currentStep<EpLen; ++currentStep) {
pos = ep*EpLen + currentStep;
double thisStepAdvantage = Reward[pos];
for (int futureStep=currentStep+1; futureStep<EpLen; ++futureStep) {
thisStepAdvantage += Reward[ep*EpLen+futureStep] * pow(gamma*lam, futureStep-currentStep);
}
Advantage[pos] = thisStepAdvantage;
}
}
// Standarise Advantage
double AdvantageMean = 0;
for (int i=0; i<NumSamples; ++i) AdvantageMean += Advantage[i];
AdvantageMean = AdvantageMean / (double) NumSamples;
double AdvantageStd = 0;
for (int i=0; i<NumSamples; ++i) AdvantageStd += (Advantage[i] - AdvantageMean)*(Advantage[i] - AdvantageMean);
AdvantageStd = sqrt(AdvantageStd / (double) (NumSamples));
for (int i=0; i<NumSamples; ++i) Advantage[i] = (Advantage[i] - AdvantageMean) / AdvantageStd;
///////// Baseline Update ///////// TODO: Can be executed concurrently with TRPO Update
gettimeofday(&tv5, NULL);
// Write Weight and Bias of the Baseline to LBFGS_x
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSizeBase[i];
size_t nextLayerDim = LayerSizeBase[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
LBFGS_x[pos] = WBase[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
LBFGS_x[pos] = BBase[i][k];
pos++;
}
}
// Run L-BFGS Algorithm to optimise the Baseline
lbfgs(BaselineParam.PaddedParams, LBFGS_x, &LBFGS_fx, evaluate, NULL, &BaselineParam, &LBFGS_Param);
// Update Baseline Weight and Bias from LBFGS_x
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSizeBase[i];
size_t nextLayerDim = LayerSizeBase[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
WBase[i][j*nextLayerDim+k] = LBFGS_x[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
BBase[i][k] = LBFGS_x[pos];
pos++;
}
}
gettimeofday(&tv6, NULL);
vfTime += ((tv6.tv_sec-tv5.tv_sec) * (double)1E6 + (tv6.tv_usec-tv5.tv_usec)) / (double)1E6;
//////////////////// TRPO Update ////////////////////
gettimeofday(&tv7, NULL);
///////// Computing Policy Gradient /////////
// reset Policy Gradient to 0
for (size_t i=0; i<NumParams; ++i) b[i] = 0;
// Processing all training samples
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Ordinary Forward Propagation /////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {Layer[i+1][j] = 0.1*Layer[i+1][j]; break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
///////// Ordinary Backward Propagation /////////
// Gradient Initialisation
// Assign the derivative of Surrogate Loss w.r.t. Mean (output values from the final layer) and LogStd
for (size_t i=0; i<ActionSpaceDim; ++i) {
double temp = (Action[iter*ActionSpaceDim+i] - Mean[iter*ActionSpaceDim+i]) / exp(LogStd[i]);
GLayer[NumLayers-1][i] = Advantage[iter] * temp / exp(LogStd[i]);
GLogStd[i] = Advantage[iter] * (temp * temp - 1);
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {GLayer[i][j] = GLayer[i][j] * (1- Layer[i][j] * Layer[i][j]); break;}
// 0.1x Activation Function
case 'o': {GLayer[i][j] = 0.1 * GLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {GLayer[i][j] = GLayer[i][j] * Layer[i][j] * (1- Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
GB[i-1][j] = GLayer[i][j];
}
// Calculate the derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
GW[i-1][j*LayerSize[i]+k] = GLayer[i][k] * Layer[i-1][j];
}
}
// Calculate the derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
GLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
GLayer[i-1][j] += GLayer[i][k] * W[i-1][j*LayerSize[i]+k];
}
}
}
// Accumulate the Policy Gradient to b
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
b[pos] += GW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
b[pos] += GB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
b[pos] += GLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Policy Gradient over the samples - Policy Gradient is held in b
// Note this corresponds to -g in the Python code: b = -g
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
b[i] = b[i] / (double)NumSamples;
}
//////////////////// Computing Search Direction ////////////////////
///////// Conjugate Gradient /////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// x: The Conjugate Gradient Result, i.e. solution x to Ax=b
// In TRPO context, x is the Step Direction of the line search (stepdir in the Python code)
// b: Vector b in the equation Ax=b
// Initialisation
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
x[i] = 0;
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Initialisation - FVP
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = b[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = b[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = b[pos];
pos++;
}
// Feed Weight and VWeight into Observation
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Feed actual observation data into Observation
for (size_t iter_=0; iter_<NumSamples; ++iter_) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
if (RowNumPadded<RowNumLimit) Observation[RowNum*ObservVecWidth+X] = Observ[iter_*ObservSpaceDim+RowNumPadded];
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd_CG[RowNum] = B[ID][k];
else BiasStd_CG[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd_CG[RowNum] = VB[ID][k];
else BiasStd_CG[RowNum] = 0;
RowNum++;
}
}
// Feed (1/Std)^2 into BiasStd
for (size_t k=0; k<PaddedLayerSize[NumLayers-1]; ++k) {
size_t LayerDimLimit = LayerSize[NumLayers-1];
if (k<LayerDimLimit) BiasStd_CG[RowNum] = 1.0 / Std[k] / Std[k];
else BiasStd_CG[RowNum] = 0;
RowNum++;
}
gettimeofday(&tv3, NULL);
// Init FPGA
fprintf(stderr, "[INFO] Loading Model and Simulation Data for CG...\n");
TRPO_WriteDRAM_actions_t init_action;
init_action.param_start_bytes = 0;
init_action.param_size_bytes = PaddedObservVecItems * sizeof(double);
init_action.instream_fromCPU = Observation;
TRPO_WriteDRAM_run(engine, &init_action);
gettimeofday(&tv4, NULL);
fpgaTime += ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
// Iterative Solver
for (size_t iter_CG=0; iter_CG<=MaxIter; ++iter_CG) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", iter_CG, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || iter_CG==MaxIter) {
for (size_t i=0; i<NumParams; ++i) z[i] = x[i];
break;
}
//////////////////// FPGA - Load p ////////////////////
// Read p into VW, VB and VLogStd
// Init z to 0
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = p[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = p[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = p[pos];
z[pos] = 0;
pos++;
}
// Feed VW, VB and VLogStd into DataP
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Pad actual observation data into DataP
bool isPadding = true;
for (size_t iter_=0; iter_<NumSamples && isPadding; ++iter_) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim && isPadding; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
size_t posDataP = RowNum*ObservVecWidth+X;
if (posDataP<PaddedDataPVecItems) {
if (RowNumPadded<RowNumLimit) DataP[posDataP] = Observ[iter_*ObservSpaceDim+RowNumPadded];
else DataP[posDataP] = 0;
}
else {
isPadding = false;
break;
}
}
RowNum++;
}
}
// Feed Bias and VBias into BiasStd_CG
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd_CG[RowNum] = B[ID][k];
else BiasStd_CG[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd_CG[RowNum] = VB[ID][k];
else BiasStd_CG[RowNum] = 0;
RowNum++;
}
}
gettimeofday(&tv3, NULL);
// Feed DataP to BRAM
TRPO_WriteDRAM_actions_t write_action;
write_action.param_start_bytes = 0;
write_action.param_size_bytes = PaddedDataPVecItems * sizeof(double);
write_action.instream_fromCPU = DataP;
TRPO_WriteDRAM_run(engine, &write_action);
gettimeofday(&tv4, NULL);
fpgaTime += ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
//////////////////// FPGA - Calc z = FIM*p ////////////////////
// Init Advanced Static Interface
TRPO_Run_actions_t run_action;
run_action.param_NumSamples = NumSamples;
run_action.param_PaddedObservVecItems = PaddedObservVecItems;
run_action.instream_BiasStd = BiasStd_CG;
run_action.outstream_FVP = FVPResult;
// Run DFE and Measure Elapsed Time
gettimeofday(&tv3, NULL);
TRPO_Run_run(engine, &run_action);
gettimeofday(&tv4, NULL);
fpgaTime += ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
// Read FVP into z
pos = 0;
size_t FVPPos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerSizePadded = PaddedLayerSize[i];
size_t nextLayerSizePadded = PaddedLayerSize[i+1];
size_t curLayerSizeReal = LayerSize[i];
size_t nextLayerSizeReal = LayerSize[i+1];
for (size_t j=0; j<curLayerSizePadded; ++j) {
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if ( (j<curLayerSizeReal) && (k<nextLayerSizeReal) ) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if (k<nextLayerSizeReal) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] = 2 * NumSamples * VLogStd[k];
pos++;
}
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples;
z[i] += CG_Damping * p[i];
}
//////////////////// FPGA - End ////////////////////
// Update x and r
double pdotz = 0;
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
}
// Calculate another Fisher Vector Product - code reuse opportunity
///////// Fisher Vector Product Computation z = FVP(x) /////////
// Init PGW, PGB, PGLogStd from x
// Init z to 0
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
PGW[i][j*nextLayerDim+k] = x[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
PGB[i][k] = x[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
PGLogStd[k] = x[pos];
z[pos] = 0;
pos++;
}
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Combined Forward Propagation /////////
// Initialise the Input Layer
for (size_t i=0; i<ObservSpaceDim; ++i) {
Layer[0][i] = Observ[iter*ObservSpaceDim+i];
RxLayer[0][i] = 0;
RyLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
size_t CurrLayerSize = LayerSize[i];
size_t NextLayerSize = LayerSize[i+1];
size_t j, k;
// Propagate from Layer[i] to Layer[i+1]
#pragma omp parallel for private(j,k) shared(Layer, RxLayer, RyLayer, W, PGW, B, PGB, AcFunc) schedule(static)
for (j=0; j<NextLayerSize; ++j) {
// Initialise x_j and R{x_j} in next layer
// Here we just use y_j's memory space to store x_j temoporarily
Layer[i+1][j] = B[i][j];
RxLayer[i+1][j] = PGB[i][j];
for (k=0; k<CurrLayerSize; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += Layer[i][k] * PGW[i][k*NextLayerSize+j];
}
// Calculate y_j and R{y_j} in next layer. Note that R{y_j} depends on y_j
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {
RyLayer[i+1][j] = RxLayer[i+1][j];
break;
}
// tanh() Activation Function
case 't': {
Layer[i+1][j] = tanh(Layer[i+1][j]);
RyLayer[i+1][j] = RxLayer[i+1][j] * (1 - Layer[i+1][j] * Layer[i+1][j]);
break;
}
// sigmoid Activation Function
case 's': {
Layer[i+1][j] = 1.0 / ( 1 + exp(-Layer[i+1][j]) );
RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1 - Layer[i+1][j]);
break;
}
// Default: Activation Function not supported
default: {
printf("[ERROR] AC Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
}
}
}
}
///////// Pearlmutter Backward Propagation /////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
for (size_t i=0; i<ActionSpaceDim; ++i) {
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i] / Std[i] / Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
size_t CurrLayerSize = LayerSize[i];
size_t PrevLayerSize = LayerSize[i-1];
size_t j, k;
// Propagate from Layer[i] to Layer[i-1]
#pragma omp parallel for private(j) shared(Layer, RGLayer, RGB) schedule(static)
for (j=0; j<CurrLayerSize; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] AC Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight and the output values from Layer[i]
#pragma omp parallel for private(j,k) shared(Layer, RGLayer, W, RGW) schedule(static)
for (j=0; j<PrevLayerSize; ++j) {
double temp = 0;
for (k=0; k<CurrLayerSize; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*CurrLayerSize+k] = Layer[i-1][j] * RGLayer[i][k];
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
temp += W[i-1][j*CurrLayerSize+k] * RGLayer[i][k];
}
RGLayer[i-1][j] = temp;
}
}
// Accumulate the Fisher-Vector Product to z
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] += 2 * PGLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples + CG_Damping * x[i];
}
// Now z holds the Fisher Vector Product, x holds stepdir
double shs = 0;
#pragma omp parallel for reduction (+:shs)
for (size_t i=0; i<NumParams; ++i) {
shs += z[i] * x[i];
}
shs = shs * 0.5;
printf("shs: %.14f\n", shs);
// Lagrange Multiplier (lm in Python code)
double lm = sqrt(shs / MaxKL);
// Compute the 2-norm of the Policy Gradient
double gnorm = 0;
for (size_t i=0; i<NumParams; ++i) {
gnorm += b[i] * b[i];
}
gnorm = sqrt(gnorm);
printf("lagrange multiplier: %.14f, gnorm: %.14f\n", lm, gnorm);
// Full Step
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
fullstep[i] = x[i] / lm;
}
// Inner product of Negative Policy Gradient -g and Step Direction
double neggdotstepdir = 0;
#pragma omp parallel for reduction (+:neggdotstepdir)
for (size_t i=0; i<NumParams; ++i) {
neggdotstepdir += b[i] * x[i];
}
//////////////////// Line Search ////////////////////
// Init theta to x
// If Line Search is unsuccessful, theta remains as x
for (size_t i=0; i<NumParams; ++i) theta[i] = x[i];
// Expected Improve Rate Line Search = slope dy/dx at initial point
double expected_improve_rate = neggdotstepdir / lm;
// Temporarily Save the Model Parameters in x
// The x refers to the x in linesearch function in Python code
// Note: Although the name is the same, the x here has nothing to do with the x in Conjugate Gradient
// Copy the Model Parameters to x
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
x[pos] = W[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
x[pos] = B[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
x[pos] = LogStd[k];
pos++;
}
// Surrogate Loss of the current Model parameters = -Avg(Advantage)
double fval = 0;
#pragma omp parallel for reduction (+:fval)
for (size_t i=0; i<NumSamples; ++i) {
fval += Advantage[i];
}
fval = -fval / (double) NumSamples;
printf("fval before %.14e\n", fval);
// Backtracking Line Search
for (size_t i=0; i<MaxBackTracks; ++i) {
// Step Fraction
double stepfrac = pow(0.5, (double)i);
// x New
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
xnew[i] = x[i] + stepfrac * fullstep[i];
}
///////// Compute Surrogate Loss /////////
// Init W, B, LogStd from xnew
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
W[i][j*nextLayerDim+k] = xnew[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
B[i][k] = xnew[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
LogStd[k] = xnew[pos];
pos++;
}
// Init Surrogate Loss to 0
double surr = 0;
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Ordinary Forward Propagation /////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Surrogate Loss Calculation
// LoglikelihoodDifference = logp_i - oldlogp_i
// Here, logp_i is derived from xnew, oldlogp_i is derived from x (Mean in the simulation data)
double LoglikelihoodDifference = 0;
for (size_t i=0; i<ActionSpaceDim; ++i) {
double temp_x = (Action[iter*ActionSpaceDim+i] - Mean[iter*ActionSpaceDim+i]) / Std[i];
double temp_xnew = (Action[iter*ActionSpaceDim+i] - Layer[NumLayers-1][i]) / exp(LogStd[i]);
LoglikelihoodDifference += temp_x*temp_x - temp_xnew*temp_xnew + log(Std[i]) - LogStd[i];
}
LoglikelihoodDifference = LoglikelihoodDifference * 0.5;
// Accumulate Surrogate Loss
surr += exp(LoglikelihoodDifference) * Advantage[iter];
}
// Average Surrogate Loss over the samples to get newfval
double newfval = -surr / (double) NumSamples;
// Improvement in terms of Surrogate Loss
double actual_improve = fval - newfval;
// Expected Improvement
double expected_improve = expected_improve_rate * stepfrac;
// Improvement Ratio
double ratio = actual_improve / expected_improve;
printf("a/e/r %.14f / %.14f / %.14f\n", actual_improve, expected_improve, ratio);
// Check breaking condition - has Line Search succeeded?
if ( (ratio > AcceptRatio) && (actual_improve > 0) ) {
// If Line Search is successful, update parameters and quit
for (size_t i=0; i<NumParams; ++i) theta[i] = xnew[i];
break;
}
} // End of Line Search
gettimeofday(&tv8, NULL);
trpoTime += ((tv8.tv_sec-tv7.tv_sec) * (double)1E6 + (tv8.tv_usec-tv7.tv_usec)) / (double)1E6;
// Update Model from theta
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
W[i][j*nextLayerDim+k] = theta[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
B[i][k] = theta[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
LogStd[k] = theta[pos];
pos++;
}
// Toc
gettimeofday(&tv2, NULL);
runtimeS += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
//////////////////// Save Training Result ////////////////////
// Save training result EVERY 100 Iteration as well as in the Last Iteration
if(iter%100==0 || iter==(NumIter-1)) {
// Generate Result File Name
char ResultFileName[30];
strcpy(ResultFileName, ResultFile);
char suffix[10];
sprintf(suffix, "%03d.txt", iter);
strcat(ResultFileName, suffix);
// Open Result File to write Weights, Bias and LogStd
FILE *ResultFilePointer = fopen(ResultFileName, "w");
if (ResultFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Result File [%s]. \n", ResultFileName);
return -1;
}
// Write Weights and Bias to file
for (size_t i=0; i<NumLayers-1; ++i) {
// Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fprintf(ResultFilePointer, "%.14f\n", W[i][j*nextLayerDim+k]);
}
}
// Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fprintf(ResultFilePointer, "%.14f\n", B[i][k]);
}
}
// LogStd
for (size_t k=0; k<ActionSpaceDim; ++k) {
fprintf(ResultFilePointer, "%.14f\n", LogStd[k]);
}
// Close Result File
fclose(ResultFilePointer);
}
} // Training Finished
fprintf(stderr, "[INFO] Total Time for training is %fs: FPGA Time = %fs, simuTime = %fs, trpoTime = %fs, VF Update = %fs.\n", runtimeS, fpgaTime, simuTime, trpoTime, vfTime);
//////////////////// Report Episodic Reward ////////////////////
FILE *RewardFilePointer = fopen("EpRewMean.txt", "w");
if (RewardFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Reward File \"EpRewMean.txt\"\n");
return -1;
}
for (size_t k=0; k<NumIter; ++k) {
fprintf(RewardFilePointer, "%.14f\n", episodicRew[k]);
}
fclose(RewardFilePointer);
//////////////////// Clean Up ////////////////////
// Clean-Up L-BFGS
lbfgs_free(LBFGS_x);
// Model: Weight & Bias, Gradient of Weight & Bias, Policy Gradient of Weight & Bias, R{} Gradient of Weight & Bias
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(B[i]);
free(GW[i]); free(GB[i]);
free(VW[i]); free(VB[i]);
free(PGW[i]); free(PGB[i]);
free(RGW[i]); free(RGB[i]);
}
// Model: LogStd, Gradient of LogStd, Policy Gradient of LogStd, R{} Gradient of LogStd
free(LogStd); free(GLogStd); free(PGLogStd); free(RGLogStd); free(VLogStd);
// Baseline: Weight & Bias, Gradient of Weight & Bias
for (size_t i=0; i<NumLayers-1; ++i) {
free(WBase[i]); free(BBase[i]);
free(GWBase[i]); free(GBBase[i]);
}
// Forward and Backward Propagation
for (size_t i=0; i<NumLayers; ++i) {
// Model: Ordinary Forward and Backward Propagation
free(Layer[i]); free(GLayer[i]);
// Model: Pearlmutter Forward and Backward Propagation
free(RxLayer[i]); free(RyLayer[i]); free(RGLayer[i]);
// Baseline: Ordinary Forward and Backward Propagation
free(LayerBase[i]); free(GLayerBase[i]);
}
// Conjugate Gradient
free(b); free(p); free(r); free(x); free(z);
// Line Search
free(fullstep); free(xnew); free(theta);
// MuJoCo: Observation, Action and Observation Filtering
free(ob); free(ac); free(obMean); free(obVar);
// Simulation Data and Advantage Calculation
free(Observ); free(Mean); free(Std); free(Action); free(Reward);
free(Return); free(Baseline); free(Advantage); free(episodicRew);
// FPGA
max_unload(engine); TRPO_free();
free(BlockDim); free(WeightInit); free(BiasStd);
free(Observ_FPGA); free(Mean_FPGA); free(Action_FPGA); free(Reward_FPGA);
// Free Memories Allocated for DFE
free(Observation); free(BiasStd_CG); free(FVPResult); free(DataP);
return runtimeS;
}
|
GB_unaryop__abs_fp32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_uint32
// op(A') function: GB_tran__abs_fp32_uint32
// C type: float
// A type: uint32_t
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_uint32
(
float *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_int8)
// op(A') function: GB (_unop_tran__identity_uint32_int8)
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_int8)
(
uint32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_office2010_fmt_plug.c | /* MS Office 2010 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* OpenCL support by magnum.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum and it is hereby released to the general public
* under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_office2010;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_office2010);
#else
#include "sha.h"
#include <openssl/aes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "common-opencl.h"
#include "config.h"
#define PLAINTEXT_LENGTH 51
#define UNICODE_LENGTH 104 /* In octets, including 0x80 */
#define FORMAT_LABEL "office2010-opencl"
#define FORMAT_NAME "MS Office 2010"
#define OCL_ALGORITHM_NAME "SHA1 OpenCL"
#define CPU_ALGORITHM_NAME " AES"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (100,000 iterations)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_LENGTH 16
#define SALT_SIZE sizeof(*cur_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
static struct fmt_tests tests[] = {
/* 2010-Default_myhovercraftisfullofeels_.docx */
{"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"},
{NULL}
};
static struct custom_salt {
char unsigned osalt[SALT_LENGTH];
char unsigned encryptedVerifier[16];
char unsigned encryptedVerifierHash[32];
int version;
int spinCount;
int keySize;
int saltSize;
} *cur_salt;
static int *cracked, any_cracked;
static unsigned int v_width = 1; /* Vector width of kernel */
static char *saved_key; /* Password encoded in UCS-2 */
static int *saved_len; /* UCS-2 password length, in octets */
static char *saved_salt;
static unsigned char *key; /* Output key from kernel */
static int new_keys, spincount;
static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key, cl_spincount;
static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key;
static cl_kernel GenerateSHA1pwhash, Generate2010key;
#define HASH_LOOPS 500 /* Lower figure gives less X hogging */
#define ITERATIONS 100000
#define STEP 0
#define SEED 128
#define OCL_CONFIG "office2010"
static const char * warn[] = {
"xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: "
};
static int split_events[] = { 3, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2010key));
return s;
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
int i;
int bench_len = strlen(tests[0].plaintext) * 2;
gws *= v_width;
pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key");
memset(saved_key, 0, UNICODE_LENGTH * gws);
pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len");
for (i = 0; i < gws; i++)
saved_len[i] = bench_len;
pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt");
memset(saved_salt, 0, SALT_LENGTH);
cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device state buffer");
pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 32 * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory verifier keys");
memset(key, 0, 32 * gws);
cl_spincount = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(cl_int), &spincount, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping spincount");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 2, sizeof(cl_mem), (void*)&cl_spincount), "Error setting argument 2");
cracked = mem_alloc(sizeof(*cracked) * gws);
}
static void release_clobj(void)
{
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings");
HANDLE_CLERROR(clReleaseMemObject(cl_spincount), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer");
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(Generate2010key), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void clear_keys(void)
{
memset(saved_key, 0, UNICODE_LENGTH * global_work_size * v_width);
memset(saved_len, 0, sizeof(*saved_len) * global_work_size * v_width);
}
static void set_key(char *key, int index)
{
UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH];
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(utfkey);
/* Prepare for GPU */
utfkey[saved_len[index]] = 0x80;
saved_len[index] <<= 1;
new_keys = 1;
}
static void *get_salt(char *ciphertext)
{
int i, length;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *p;
cur_salt = mem_calloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += 9; /* skip over "$office$*" */
p = strtok(ctcopy, "*");
cur_salt->version = atoi(p);
p = strtok(NULL, "*");
cur_salt->spinCount = atoi(p);
p = strtok(NULL, "*");
cur_salt->keySize = atoi(p);
p = strtok(NULL, "*");
cur_salt->saltSize = atoi(p);
if (cur_salt->saltSize > SALT_LENGTH) {
fprintf(stderr, "** error: salt longer than supported:\n%s\n", ciphertext);
cur_salt->saltSize = SALT_LENGTH; /* will not work, but protects us from segfault */
}
p = strtok(NULL, "*");
for (i = 0; i < cur_salt->saltSize; i++)
cur_salt->osalt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
for (i = 0; i < 16; i++)
cur_salt->encryptedVerifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
length = strlen(p) / 2;
for (i = 0; i < length; i++)
cur_salt->encryptedVerifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cur_salt;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH);
spincount = cur_salt->spinCount;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_spincount, CL_FALSE, 0, 4, &spincount, 0, NULL, NULL), "failed in clEnqueueWriteBuffer spincount");
}
static int crypt_all(int *pcount, struct db_salt *salt);
static int crypt_all_benchmark(int *pcount, struct db_salt *salt);
static void init(struct fmt_main *self)
{
char build_opts[64];
static char valgo[32] = "";
if ((v_width = opencl_get_vector_width(gpu_id,
sizeof(cl_int))) > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, v_width);
self->params.algorithm_name = valgo;
}
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS,
UNICODE_LENGTH,
v_width);
opencl_init("$JOHN/kernels/office2010_kernel.cl", gpu_id,
build_opts);
// create kernel to execute
GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
Generate2010key = clCreateKernel(program[gpu_id], "Generate2010key", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events,
warn, 3, self, create_clobj, release_clobj,
UNICODE_LENGTH, 0);
// Auto tune execution from shared/included code.
self->methods.crypt_all = crypt_all_benchmark;
autotune_run(self, ITERATIONS + 4, 0,
(cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL));
self->methods.crypt_all = crypt_all;
self->params.min_keys_per_crypt = local_work_size * v_width;
self->params.max_keys_per_crypt = global_work_size * v_width;
if (pers_opts.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int res;
if (strncmp(ciphertext, "$office$*2010*", 14))
return 0;
if (!(ctcopy = strdup(ciphertext))) {
fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL);
return 0;
}
keeptr = ctcopy;
ctcopy += 15;
if (!(ptr = strtok(ctcopy, "*"))) /* hash size or iterations */
goto error;
if (!(ptr = strtok(NULL, "*")))
goto error;
if (strncmp(ptr, "128", 3) && strncmp(ptr, "256", 3)) /* key size */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* salt size */
goto error;
res = atoi(ptr);
if (res != 16) /* can we handle other values? */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* salt */
goto error;
if (strlen(ptr) != res * 2)
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier */
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier hash */
goto error;
if (!ishex(ptr))
goto error;
if (strlen(ptr) > 64)
goto error;
if ((ptr = strtok(NULL, "*")))
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void DecryptUsingSymmetricKeyAlgorithm(unsigned char *verifierInputKey, unsigned char *encryptedVerifier, const unsigned char *decryptedVerifier, int length)
{
unsigned char iv[32];
AES_KEY akey;
memcpy(iv, cur_salt->osalt, 16);
memset(&iv[16], 0, 16);
memset(&akey, 0, sizeof(AES_KEY));
if(cur_salt->keySize == 128) {
if(AES_set_decrypt_key(verifierInputKey, 128, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
}
else {
if(AES_set_decrypt_key(verifierInputKey, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
}
AES_cbc_encrypt(encryptedVerifier, (unsigned char*)decryptedVerifier, length, &akey, iv, AES_DECRYPT);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
size_t gws, scalar_gws;
gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size;
scalar_gws = gws * v_width;
if (any_cracked) {
memset(cracked, 0, count * sizeof(*cracked));
any_cracked = 0;
}
if (new_keys) {
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_key");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_len");
new_keys = 0;
}
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, firstEvent), "failed in clEnqueueNDRangeKernel");
for (index = 0; index < spincount / HASH_LOOPS; index++) {
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, &local_work_size, 0, NULL, lastEvent), "failed in clEnqueueNDRangeKernel");
// read back verifier keys
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 32 * scalar_gws, key, 0, NULL, NULL), "failed in reading key back");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
SHA_CTX ctx;
unsigned char hash[20];
unsigned char decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
DecryptUsingSymmetricKeyAlgorithm(&key[32*index], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
DecryptUsingSymmetricKeyAlgorithm(&key[32*index+16], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA1_Final(hash, &ctx);
if (!memcmp(hash, decryptedVerifierHashBytes, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int crypt_all_benchmark(int *pcount, struct db_salt *salt)
{
int count = *pcount;
size_t gws, scalar_gws;
gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size;
scalar_gws = gws * v_width;
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key");
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel");
// read back aes key
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back");
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *get_key(int index)
{
UTF16 buf[PLAINTEXT_LENGTH + 1];
memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]);
buf[saved_len[index] >> 1] = 0;
return (char*)utf16_to_enc(buf);
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
/*
* Is spinCount always 100000, or just in our format tests?
*/
return (unsigned int) my_salt->spinCount;
}
#endif
struct fmt_main fmt_opencl_office2010 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
cell_division_gpu.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#define SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#include <array>
#include "biodynamo.h"
#include "core/param/command_line_options.h"
#include "core/util/math.h"
#include "core/util/timing.h"
namespace bdm {
// ----------------------------------------------------------------------------
// Starting with 8 cells, we let each cell grow in volume up until a point
// a cell must divide. This tests whether the GPU accelerated mechanical
// interactions properly handle the creation of new cells.
// -----------------------------------------------------------------------------
inline void ExpectArrayNear(const Double3& actual, const Double3& expected,
bool* wrong) {
for (size_t i = 0; i < actual.size(); i++) {
if (std::fabs(expected[i] - actual[i]) > 1e-9) {
*wrong = true;
std::cout << "Wrong result! Expected " << expected[i]
<< ", but instead got " << actual[i]
<< ", which is a difference of "
<< std::fabs(expected[i] - actual[i])
<< ", which is larger than 1e-9" << std::endl;
}
}
}
inline void RunTest(bool* wrong, OpComputeTarget mode, uint64_t timesteps,
uint64_t cells_per_dim) {
std::cout << "Running simulation on ";
auto set_param = [&](auto* param) {
switch (mode) {
case kCpu:
std::cout << "CPU (" << omp_get_max_threads() << " threads)\n";
break;
case kOpenCl:
std::cout << "GPU (OpenCL)\n";
param->compute_target_ = "opencl";
break;
case kCuda:
std::cout << "GPU (CUDA)\n";
param->compute_target_ = "cuda";
break;
}
};
Simulation simulation("cell_division_gpu", set_param);
auto* rm = simulation.GetResourceManager();
rm->Clear();
// We need to give every test the same seed for the RNG, because in the cell
// division, random numbers are used. Within a single executable these numbers
// vary. Also within the threads this needs to be enforced
#pragma omp parallel
simulation.GetRandom()->SetSeed(1);
auto construct = [](const Double3& position) {
auto* cell = new Cell(position);
cell->SetDiameter(30);
cell->SetAdherence(0.4);
cell->SetMass(1.0);
cell->AddBiologyModule(new GrowDivide(30.05, 5000, {gAllEventIds}));
return cell;
};
for (size_t x = 0; x < cells_per_dim; x++) {
double x_pos = x * 20.0;
for (size_t y = 0; y < cells_per_dim; y++) {
double y_pos = y * 20.0;
for (size_t z = 0; z < cells_per_dim; z++) {
auto new_simulation_object = construct({x_pos, y_pos, z * 20.0});
rm->push_back(new_simulation_object);
}
}
}
{
Timing timer("Execution time");
simulation.GetScheduler()->Simulate(timesteps);
}
// TODO: add verification of results
}
inline int Simulate(int argc, const char** argv) {
auto options = CommandLineOptions(argc, argv);
options.AddOption<bool>("verify", "false");
options.AddOption<uint64_t>("cells-per-dim", "64");
options.AddOption<uint64_t>("timesteps", "5");
uint64_t cells_per_dim = options.Get<uint64_t>("cells-per-dim");
uint64_t timesteps = options.Get<uint64_t>("timesteps");
bool wrong = true;
bool is_opencl = options.Get<bool>("opencl");
bool is_cuda = options.Get<bool>("cuda");
// TODO(ahmad): after Trello card ("Fix inconsistency in cell state due to
// direct updates in Biology Modules") enable multithreading, and adjust
// results if necessary
// omp_set_num_threads(1);
if (!is_cuda && !is_opencl) {
// Run CPU version
RunTest(&wrong, kCpu, timesteps, cells_per_dim);
}
#ifdef USE_CUDA
if (is_cuda) {
// Run GPU (CUDA) version
RunTest(&wrong, kCuda, timesteps, cells_per_dim);
}
#endif // USE_CUDA
#ifdef USE_OPENCL
if (is_opencl) {
// Run GPU (OpenCL) version
RunTest(&wrong, kOpenCl, timesteps, cells_per_dim);
}
#endif // USE_OPENCL
return !wrong;
}
} // namespace bdm
#endif // SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
|
gdcpp.h | /* gdcpp.h
*
* Author: Fabian Meyer
* Created On: 12 Jul 2019
* License: MIT
*/
#ifndef GDCPP_GDCPP_H_
#define GDCPP_GDCPP_H_
#include <Eigen/Geometry>
#include <limits>
#include <iostream>
#include <iomanip>
#include <functional>
namespace gdc
{
typedef long int Index;
/** Functor to compute forward differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x + eps) - f(x)) / eps
*
* The computation requires len(x) evaluations of the objective.
*/
template<typename Scalar>
class ForwardDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
ForwardDifferences()
: ForwardDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
ForwardDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar fval,
Vector &gradient)
{
assert(objective_);
gradient.resize(xval.size());
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < xval.size(); ++i)
{
Vector xvalN = xval;
xvalN(i) += eps_;
Scalar fvalN = objective_(xvalN);
gradient(i) = (fvalN - fval) / eps_;
}
}
};
/** Functor to compute backward differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x) - f(x - eps)) / eps
*
* The computation requires len(x) evaluations of the objective.
*/
template<typename Scalar>
class BackwardDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
BackwardDifferences()
: BackwardDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
BackwardDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar fval,
Vector &gradient)
{
assert(objective_);
gradient.resize(xval.size());
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < xval.size(); ++i)
{
Vector xvalN = xval;
xvalN(i) -= eps_;
Scalar fvalN = objective_(xvalN);
gradient(i) = (fval - fvalN) / eps_;
}
}
};
/** Functor to compute central differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x + 0.5 eps) - f(x - 0.5 eps)) / eps
*
* The computation requires 2 * len(x) evaluations of the objective.
*/
template<typename Scalar>
struct CentralDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
CentralDifferences()
: CentralDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
CentralDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar,
Vector &gradient)
{
assert(objective_);
Vector fvals(xval.size() * 2);
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < fvals.size(); ++i)
{
Index idx = i / 2;
Vector xvalN = xval;
if(i % 2 == 0)
xvalN(idx) += eps_ / 2;
else
xvalN(idx) -= eps_ / 2;
fvals(i) = objective_(xvalN);
}
gradient.resize(xval.size());
for(Index i = 0; i < xval.size(); ++i)
gradient(i) = (fvals(i * 2) - fvals(i * 2 + 1)) / eps_;
}
};
/** Dummy callback functor, which does nothing. */
template<typename Scalar>
struct NoCallback
{
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
bool operator()(const Index,
const Vector &,
const Scalar,
const Vector &) const
{
return true;
}
};
/** Step size functor, which returns a constant step size. */
template<typename Scalar>
class ConstantStepSize
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
private:
Scalar stepSize_;
public:
ConstantStepSize()
: ConstantStepSize(0.7)
{ }
ConstantStepSize(const Scalar stepSize)
: stepSize_(stepSize)
{ }
/** Set the step size returned by this functor.
* @param stepSize step size returned by functor */
void setStepSize(const Scalar stepSize)
{
stepSize_ = stepSize;
}
void setObjective(const Objective &)
{ }
void setFiniteDifferences(const FiniteDifferences &)
{ }
Scalar operator()(const Vector &,
const Scalar,
const Vector &)
{
return stepSize_;
}
};
/** Step size functor to compute Barzilai-Borwein (BB) steps.
* The functor can either compute the direct or inverse BB step.
* The steps are computed as follows:
*
* s_k = x_k - x_k-1 k >= 1
* y_k = grad_k - grad_k-1 k >= 1
* Direct: stepSize = (s_k^T * s_k) / (y_k^T * s_k)
* Inverse: stepSize = (y_k^T * s_k) / (y_k^T * y_k)
*
* The very first step is computed as a constant. */
template<typename Scalar>
class BarzilaiBorwein
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
enum class Method
{
Direct,
Inverse
};
private:
Vector lastXval_;
Vector lastGradient_;
Method method_;
Scalar constStep_;
Scalar constantStep() const
{
return constStep_;
}
Scalar directStep(const Vector &xval,
const Vector &gradient)
{
auto sk = xval - lastXval_;
auto yk = gradient - lastGradient_;
Scalar num = sk.dot(sk);
Scalar denom = sk.dot(yk);
if(denom == 0)
return 1;
else
return std::abs(num / denom);
}
Scalar inverseStep(const Vector &xval,
const Vector &gradient)
{
auto sk = xval - lastXval_;
auto yk = gradient - lastGradient_;
Scalar num = sk.dot(yk);
Scalar denom = yk.dot(yk);
if(denom == 0)
return 1;
else
return std::abs(num / denom);
}
public:
BarzilaiBorwein()
: BarzilaiBorwein(Method::Direct, 1e-4)
{ }
BarzilaiBorwein(const Method method, const Scalar constStep)
: lastXval_(), lastGradient_(), method_(method),
constStep_(constStep)
{ }
void setObjective(const Objective &)
{ }
void setFiniteDifferences(const FiniteDifferences &)
{ }
void setMethod(const Method method)
{
method_ = method;
}
void setConstStepSize(const Scalar stepSize)
{
constStep_ = stepSize;
}
Scalar operator()(const Vector &xval,
const Scalar,
const Vector &gradient)
{
Scalar stepSize = 0;
if(lastXval_.size() == 0)
{
stepSize = constStep_;
}
else
{
switch(method_)
{
case Method::Direct:
stepSize = directStep(xval, gradient);
break;
case Method::Inverse:
stepSize = inverseStep(xval, gradient);
break;
default:
assert(false);
break;
}
}
lastGradient_ = gradient;
lastXval_ = xval;
return stepSize;
}
};
/** Step size functor to perform Armijo Linesearch with backtracking.
* The functor iteratively decreases the step size until the following
* conditions are met:
*
* Armijo: f(x - stepSize * grad(x)) <= f(x) - cArmijo * stepSize * grad(x)^T * grad(x)
*
* If either condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize */
template<typename Scalar>
class ArmijoBacktracking
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
protected:
Scalar decrease_;
Scalar cArmijo_;
Scalar minStep_;
Scalar maxStep_;
Index maxIt_;
Objective objective_;
FiniteDifferences finiteDifferences_;
Scalar evaluateObjective(const Vector &xval, Vector &gradient)
{
gradient.resize(0);
Scalar fval = objective_(xval, gradient);
if(gradient.size() == 0)
finiteDifferences_(xval, fval, gradient);
return fval;
}
virtual bool computeSecondCondition(const Scalar,
const Scalar,
const Scalar,
const Vector &,
const Vector &)
{
return true;
}
public:
ArmijoBacktracking()
: ArmijoBacktracking(0.8, 1e-4, 1e-12, 1.0, 0)
{ }
ArmijoBacktracking(const Scalar decrease,
const Scalar cArmijo,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: decrease_(decrease), cArmijo_(cArmijo), minStep_(minStep),
maxStep_(maxStep), maxIt_(iterations), objective_()
{
assert(decrease > 0);
assert(decrease < 1);
assert(cArmijo > 0);
assert(cArmijo < 0.5);
assert(minStep < maxStep);
}
/** Set the decreasing factor for backtracking.
* Assure that decrease in (0, 1).
* @param decrease decreasing factor */
void setBacktrackingDecrease(const Scalar decrease)
{
assert(decrease > 0);
assert(decrease < 1);
decrease_ = decrease;
}
/** Set the relaxation constant for the Armijo condition (see class
* description).
* Assure cArmijo in (0, 0.5).
* @param cArmijo armijo constant */
void setArmijoConstant(const Scalar cArmijo)
{
assert(cArmijo > 0);
assert(cArmijo < 0.5);
cArmijo_ = cArmijo;
}
/** Set the bounds for the step size during linesearch.
* The final step size is guaranteed to be in [minStep, maxStep].
* @param minStep minimum step size
* @param maxStep maximum step size */
void setStepBounds(const Scalar minStep, const Scalar maxStep)
{
assert(minStep < maxStep);
minStep_ = minStep;
maxStep_ = maxStep;
}
/** Set the maximum number of iterations.
* Set to 0 or negative for infinite iterations.
* @param iterations maximum number of iterations */
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setFiniteDifferences(const FiniteDifferences &finiteDifferences)
{
finiteDifferences_ = finiteDifferences;
}
Scalar operator()(const Vector &xval,
const Scalar fval,
const Vector &gradient)
{
assert(objective_);
assert(finiteDifferences_);
Scalar stepSize = maxStep_ / decrease_;
Vector gradientN;
Vector xvalN;
Scalar fvalN;
bool armijoCondition = false;
bool secondCondition = false;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
stepSize * decrease_ >= minStep_ &&
!(armijoCondition && secondCondition))
{
stepSize = decrease_ * stepSize;
xvalN = xval - stepSize * gradient;
fvalN = evaluateObjective(xvalN, gradientN);
armijoCondition = fvalN <= fval - cArmijo_ * stepSize * gradient.dot(gradient);
secondCondition = computeSecondCondition(stepSize, fval, fvalN, gradient, gradientN);
++iterations;
}
return stepSize;
}
};
/** Step size functor to perform Wolfe Linesearch with backtracking.
* The functor iteratively decreases the step size until the following
* conditions are met:
*
* Armijo: f(x - stepSize * grad(x)) <= f(x) - cArmijo * stepSize * grad(x)^T * grad(x)
* Wolfe: grad(x)^T grad(x - stepSize * grad(x)) <= cWolfe * grad(x)^T * grad(x)
*
* If either condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize */
template<typename Scalar>
class WolfeBacktracking : public ArmijoBacktracking<Scalar>
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
protected:
Scalar cWolfe_;
virtual bool computeSecondCondition(const Scalar,
const Scalar ,
const Scalar ,
const Vector &gradient,
const Vector &gradientN)
{
return gradient.dot(gradientN) <= cWolfe_ * gradient.dot(gradient);
}
public:
WolfeBacktracking()
: WolfeBacktracking(0.8, 1e-4, 0.9, 1e-12, 1.0, 0)
{ }
WolfeBacktracking(const Scalar decrease,
const Scalar cArmijo,
const Scalar cWolfe,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: ArmijoBacktracking<Scalar>(decrease, cArmijo, minStep, maxStep,
iterations),cWolfe_(cWolfe)
{
assert(cWolfe < 1);
assert(cArmijo < cWolfe);
}
/** Set the wolfe constants for Armijo and Wolfe condition (see class
* description).
* Assure that c1 < c2 < 1 and c1 in (0, 0.5).
* @param c1 armijo constant
* @param c2 wolfe constant */
void setWolfeConstant(const Scalar cWolfe)
{
assert(cWolfe < 1);
cWolfe_ = cWolfe;
}
};
/** Step size functor which searches for a step that reduces the function
* value.
* The functor iteratively decreases the step size until the following
* condition is met:
*
* f(x - stepSize * grad) < f(x)
*
* If this condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize
*
* This functor does not require to compute any gradients and does not use
* finite differences. */
template<typename Scalar>
class DecreaseBacktracking
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
private:
Scalar decrease_;
Scalar minStep_;
Scalar maxStep_;
Index maxIt_;
Objective objective_;
public:
DecreaseBacktracking()
: DecreaseBacktracking(0.8, 1e-12, 1.0, 0)
{ }
DecreaseBacktracking(const Scalar decrease,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: decrease_(decrease), minStep_(minStep),
maxStep_(maxStep), maxIt_(iterations), objective_()
{ }
/** Set the decreasing factor for backtracking.
* Assure that decrease in (0, 1).
* @param decrease decreasing factor */
void setBacktrackingDecrease(const Scalar decrease)
{
decrease_ = decrease;
}
/** Set the bounds for the step size during linesearch.
* The final step size is guaranteed to be in [minStep, maxStep].
* @param minStep minimum step size
* @param maxStep maximum step size */
void setStepBounds(const Scalar minStep, const Scalar maxStep)
{
assert(minStep < maxStep);
minStep_ = minStep;
maxStep_ = maxStep;
}
/** Set the maximum number of iterations.
* Set to 0 or negative for infinite iterations.
* @param iterations maximum number of iterations */
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setFiniteDifferences(const FiniteDifferences &)
{ }
Scalar operator()(const Vector &xval,
const Scalar fval,
const Vector &gradient)
{
assert(objective_);
Scalar stepSize = maxStep_ / decrease_;
Vector xvalN;
Vector gradientN;
Scalar fvalN;
bool improvement = false;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
stepSize * decrease_ >= minStep_ &&
!improvement)
{
stepSize = decrease_ * stepSize;
xvalN = xval - stepSize * gradient;
fvalN = objective_(xvalN, gradientN);
improvement = fvalN < fval;
++iterations;
}
return stepSize;
}
};
template<typename Scalar,
typename Objective,
typename StepSize=BarzilaiBorwein<Scalar>,
typename Callback=NoCallback<Scalar>,
typename FiniteDifferences=CentralDifferences<Scalar>>
class GradientDescent
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
struct Result
{
Index iterations;
bool converged;
Scalar fval;
Vector xval;
};
protected:
Index maxIt_;
Scalar minGradientLen_;
Scalar minStepLen_;
Scalar momentum_;
Index verbosity_;
Objective objective_;
StepSize stepSize_;
Callback callback_;
FiniteDifferences finiteDifferences_;
Scalar evaluateObjective(const Vector &xval, Vector &gradient)
{
gradient.resize(0);
Scalar fval = objective_(xval, gradient);
if(gradient.size() == 0)
finiteDifferences_(xval, fval, gradient);
return fval;
}
std::string vector2str(const Vector &vec) const
{
std::stringstream ss1;
ss1 << std::fixed << std::showpoint << std::setprecision(6);
std::stringstream ss2;
ss2 << '[';
for(Index i = 0; i < vec.size(); ++i)
{
ss1 << vec(i);
ss2 << std::setfill(' ') << std::setw(10) << ss1.str();
if(i != vec.size() - 1)
ss2 << ' ';
ss1.str("");
}
ss2 << ']';
return ss2.str();
}
public:
GradientDescent()
: maxIt_(0), minGradientLen_(static_cast<Scalar>(1e-9)),
minStepLen_(static_cast<Scalar>(1e-9)), momentum_(0),
verbosity_(0), objective_(), stepSize_(), callback_(),
finiteDifferences_()
{
}
~GradientDescent()
{
}
void setThreads(const Index threads)
{
finiteDifferences_.setThreads(threads);
}
void setNumericalEpsilon(const Scalar eps)
{
finiteDifferences_.setNumericalEpsilon(eps);
}
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setCallback(const Callback &callback)
{
callback_ = callback;
}
void setMinGradientLength(const Scalar gradientLen)
{
minGradientLen_ = gradientLen;
}
void setMinStepLength(const Scalar stepLen)
{
minStepLen_ = stepLen;
}
void setStepSize(const StepSize stepSize)
{
stepSize_ = stepSize;
}
void setMomentum(const Scalar momentum)
{
momentum_ = momentum;
}
void setVerbosity(const Index verbosity)
{
verbosity_ = verbosity;
}
Result minimize(const Vector &initialGuess)
{
finiteDifferences_.setObjective(
[this](const Vector &xval)
{ Vector tmp; return this->objective_(xval, tmp); });
stepSize_.setObjective(
[this](const Vector &xval, Vector &gradient)
{ return this->objective_(xval, gradient); });
stepSize_.setFiniteDifferences(
[this](const Vector &xval, const Scalar fval, Vector &gradient)
{ this->finiteDifferences_(xval, fval, gradient); });
Vector xval = initialGuess;
Vector gradient;
Scalar fval;
Scalar gradientLen = minGradientLen_ + 1;
Scalar stepSize;
Vector step = Vector::Zero(xval.size());
Scalar stepLen = minStepLen_ + 1;
bool callbackResult = true;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
gradientLen >= minGradientLen_ &&
stepLen >= minStepLen_
&& callbackResult)
{
xval -= step;
fval = evaluateObjective(xval, gradient);
gradientLen = gradient.norm();
// update step according to step size and momentum
stepSize = stepSize_(xval, fval, gradient);
step = momentum_ * step + (1 - momentum_) * stepSize * gradient;
stepLen = step.norm();
// evaluate callback an save its result
callbackResult = callback_(iterations, xval, fval, gradient);
if(verbosity_ > 0)
{
std::stringstream ss;
ss << "it=" << std::setfill('0')
<< std::setw(4) << iterations
<< std::fixed << std::showpoint << std::setprecision(6)
<< " gradlen=" << gradientLen
<< " stepsize=" << stepSize
<< " steplen=" << stepLen;
if(verbosity_ > 2)
ss << " callback=" << (callbackResult ? "true" : "false");
ss << " fval=" << fval;
if(verbosity_ > 1)
ss << " xval=" << vector2str(xval);
if(verbosity_ > 2)
ss << " gradient=" << vector2str(gradient);
if(verbosity_ > 3)
ss << " step=" << vector2str(step);
std::cout << ss.str() << std::endl;
}
++iterations;
}
Result result;
result.xval = xval;
result.fval = fval;
result.iterations = iterations;
result.converged = gradientLen < minGradientLen_ ||
stepLen < minStepLen_;
return result;
}
};
}
#endif
|
zgelqf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes tile LQ factorization of a complex m-by-n matrix A.
* The factorization has the form
* \f[ A = L \times Q \f],
* where L is a lower trapezoidal with positive diagonal and Q is a matrix with
* orthonormal rows.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower
* triangular if M <= N); the elements above the diagonal represent
* the unitary matrix Q as a product of elementary reflectors, stored
* by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_zgelqs
* to solve the system of equations.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgelqf
* @sa plasma_cgelqf
* @sa plasma_dgelqf
* @sa plasma_sgelqf
* @sa plasma_zgelqs
*
******************************************************************************/
int plasma_zgelqf(int m, int n,
plasma_complex64_t *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
int householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_zgelqf(A, *T, work, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes the tile LQ factorization of a matrix.
* Non-blocking tile version of plasma_zgelqf().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_zgelqs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For LQ factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgelqf
* @sa plasma_omp_cgelqf
* @sa plasma_omp_dgelqf
* @sa plasma_omp_sgelqf
* @sa plasma_omp_zgelqs
*
******************************************************************************/
void plasma_omp_zgelqf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgelqf(A, T, work, sequence, request);
}
}
|
ten_tusscher_2004_epi_S2_16.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_16.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5934020417845,0.00128316160896992,0.780329095939604,0.780220566229028,0.000174014290922046,0.485351964461050,0.00293503332848286,0.999998357040599,1.92538423669596e-08,1.88473554467734e-05,0.999772914410323,1.00703682498466,0.999994463032827,4.65490502991951e-05,0.633301730023318,9.92651972626448,139.581508364500};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.1156251262174,0.000165810218046704,0.000133773186453739,0.000479110202185425,0.219374677494434,0.138025575941737,0.145074841732899,4.49439177041867,0.0150636017584010,1.81028903193328,1088.66028342185,0.000575512207306525,0.338168828687625,0.0190582202645448,0.00349417833561414,4.08648025582987e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->step_length = 1.0;
// submodules opts
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// overwrite default
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else
{
printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_sqp_dynamics_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage];
dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value);
return;
}
void ocp_nlp_sqp_cost_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_cost_config *cost_config = config->cost[stage];
cost_config->opts_set(cost_config, opts->cost[stage], field, value);
return;
}
void ocp_nlp_sqp_constraints_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_constraints_config *constraints_config = config->constraints[stage];
constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// nlp res
size += ocp_nlp_res_calculate_size(dims);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
// dzduxt
size += (N+1)*sizeof(struct blasfeo_dmat);
for (ii=0; ii<=N; ii++)
size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
// z_alg
size += (N+1)*sizeof(struct blasfeo_dvec);
for (ii=0; ii<=N; ii++)
size += blasfeo_memsize_dvec(nz[ii]);
size += 1*8; // blasfeo_str align
size += 1*64; // blasfeo_mem align
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(
constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
// blasfeo_str align
align_char_to(8, &c_ptr);
// dzduxt
mem->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
mem->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
mem->status = ACADOS_READY;
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// sqp
size += sizeof(ocp_nlp_sqp_work);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
// cost
size += (N + 1) * sizeof(void *);
// dynamics
size += N * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_work *work,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_opts *opts)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_work);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
int ii;
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
nlp_in->constraints[ii], opts->constraints[ii],
mem->constraints[ii], work->constraints[ii]);
}
return;
}
static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i],
nlp_mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0);
}
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
static void sqp_update_variables(void *config_, ocp_nlp_dims *dims, ocp_nlp_out *nlp_out,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// ocp_nlp_config *config = (ocp_nlp_config *) config_;
double alpha = opts->step_length;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0);
// update dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, nlp_out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, nlp_out->pi+i, 0, nlp_out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, nlp_out->lam+i, 0, nlp_out->lam+i, 0);
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, nlp_out->t+i, 0, nlp_out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0,
1.0, mem->z_alg+i, 0, nlp_out->z+i, 0);
}
}
return;
}
// Simple fixed-step Gauss-Newton based SQP routine
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
// zero timers
double total_time = 0.0;
mem->time_qp_sol = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(mem->qp_in->BAbt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(mem->dzduxt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(mem->nlp_mem->sim_guess+ii,
mem->nlp_mem->set_sim_guess+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq + ii, mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(mem->qp_in->Z + ii, mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(mem->qp_in->DCt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(mem->qp_in->idxb[ii], mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_ptr(mem->qp_in->idxs[ii], mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, mem->qp_in->RSQrq, mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, mem->qp_in->rqz, mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, mem->qp_in->BAbt, mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, mem->qp_in->b, mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, mem->qp_in->idxb, mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, mem->qp_in->DCt, mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, mem->qp_out->ux, mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, mem->qp_out->pi, mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, mem->qp_out->lam, mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work);
// main sqp loop
int sqp_iter = 0;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter);
// if (sqp_iter==2)
// exit(1);
// linearizate NLP and update QP matrices
acados_tic(&timer1);
linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, mem->nlp_mem);
nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_b :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_d :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_m :
nlp_out->inf_norm_res;
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g;
mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b;
mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d;
mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m;
}
// exit conditions on residuals
if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) &
(mem->nlp_res->inf_norm_res_b < opts->tol_eq) &
(mem->nlp_res->inf_norm_res_d < opts->tol_ineq) &
(mem->nlp_res->inf_norm_res_m < opts->tol_comp))
{
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// if (sqp_iter==1)
// exit(1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, mem->qp_in, mem->qp_out,
opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->regularize, mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(mem->qp_in, mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(mem->qp_out);
// if (sqp_iter==1)
// exit(1);
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
sqp_update_variables(config, dims, nlp_out, opts, mem, work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
}
// stop timer
total_time += acados_toc(&timer0);
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii],
"ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns with constraint module.");
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
if (status != ACADOS_SUCCESS) return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
d_ocp_qp_copy_all(mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
void ocp_nlp_sqp_get(void *config_, void *mem_, const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_res;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->dynamics_opts_set = &ocp_nlp_sqp_dynamics_opts_set;
config->cost_opts_set = &ocp_nlp_sqp_cost_opts_set;
config->constraints_opts_set = &ocp_nlp_sqp_constraints_opts_set;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
return;
}
|
matrixmultiply-ompacc2.c | /*
Naive matrix-matrix multiplication(mmm)
multiple GPUs, standard OpenMP 4.0 directives
By C. Liao
*/
#include <stdio.h>
#include <assert.h>
#include <omp.h>
#define N 1024
#define M 1024
#define K 1024
#define REAL float
int i,j,k;
REAL a[N][M],b[M][K],c[N][K], c2[N][K];
int init();
int mmm();
int mmm2();
int verify();
//#define MAX_GPU_COUNT 4
int main(void)
{
init();
mmm();
mmm2();
return verify();
}
int init()
{
for (i=0;i<N;i++)
for(j=0;j<M;j++)
a[i][j]=3.0*i*j/N/M;
for (i=0;i<M;i++)
for(j=0;j<K;j++)
b[i][j]=5.0*j*i/N/M;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
c[i][j]=0.0;
c2[i][j]=0.0;
}
return 0;
}
/*
TODO: try different i,j,k orders
a b e f a*e+ b*g , a*f+ b*h
c d x g h = c*e+ d*g, c*f+ d*h
*/
int mmm()
{
int GPU_N , idev;
int n = N;
// GPU_N = xomp_get_num_devices();
GPU_N = 1;
printf("CUDA-capable device count: %i\n", GPU_N);
#if 0
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
assert (GPU_N>0 && GPU_N<=MAX_GPU_COUNT);
#endif
omp_set_num_threads(GPU_N);
#pragma omp parallel shared (GPU_N, a, b, c, n) private(idev)
// for (idev = 0; idev < GPU_N; idev++)
{
int tid = omp_get_thread_num();
// cudaSetDevice(tid);
xomp_set_default_device (tid);
long size ;
long offset;
#if 0
int size = n / GPU_N;
int offset = size * tid;
if(tid < n%GPU_N)
{
size++;
}
if(tid >= n%GPU_N)
offset += n%GPU_N;
else
offset += tid;
#endif
XOMP_static_even_divide (0, n, GPU_N, tid, &offset, &size);
printf("thread %d working on GPU devices %d with size %ld copying data from y_ompacc with offset %ld\n",tid, tid, size,offset);
int i, j, k;
#pragma omp target device (tid) map(tofrom:c[offset:size][0:n]), map(to:a[offset:size][0:n],b[0:n][0:n], offset,size,n)
#pragma omp parallel for private(i,j,k) shared (a,b,c, n, offset, size)
for (i = offset; i < offset + size; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
}
return 0;
}
int mmm2()
{
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c2[i][j]= c2[i][j]+a[i][k]*b[k][j];
return 0;
}
int verify()
{
REAL sum=0.0, sum2=0.0;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
sum+=c[i][j];
sum2+=c2[i][j];
}
printf("sum of c[i][j] is %f\n",sum);
printf("sum of c2[i][j] is %f\n",sum2);
assert (sum == sum2);
return 0;
}
|
omp_zherk_batch.c | /**
* @file omp_zherk_batch.c
*
* @brief BBLAS zherk_batch double _Complex routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @precisions normal z -> c
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define COMPLEX
/**
Purpose
-------
<b>zherk_batch</b> is an OpenMP version of zherk_batch.
It performs the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayA[i**H] + beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i],
where alpha[i] and beta[i] are real scalars, arrayC[i] are matrices with
an N[i] by N[i] hermitian matrix and arrayA[i] are N[i] by K[i] mtrices in the first
case and a K[i] by N[i] in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i]
is to be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
arrayC[i] is to be referenced.
- = 'BblasLower' Only the lower triangular part of
arrayC[i] is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayA[i]**H + beta[i]*arrayC[i].
- = 'BblasConjTrans' arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrix arrayA[i],
and upon entry with trans[i] = 'BblasConjTrans',
K[i] specifies the number of rows of the matrix arrayA[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>complex_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>complex_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a COMPLEX_16 matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the hermitian
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper tri-
angular part of the updated matrix.
Before entry with uplo[i] = 'BblasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the hermitian matrix and the
strictly upper triangular part of arrayC[i] is not refer-
enced. On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
Note that the imaginary parts of the diagonal elements need not be set,
they are assumed to be zero,
and on exit they are set to zero.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith zherk in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_zherk_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const double *alpha,
const BBLAS_Complex64_t **arrayA, const int *lda,
const double *beta, BBLAS_Complex64_t **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "zherk_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
} else
{
LDA = K[first_index];
}
if (lda[first_index] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[first_index] = BBLAS_ERR_LDA;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 ||
((K[first_index] == 0 || alpha[first_index] == (double)0.0) &&
(beta[first_index] == (double)1.0)))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_zherk */
cblas_zherk(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
alpha[first_index],
arrayA[batch_iter],
lda[first_index],
beta[first_index],
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans){
LDA = N[batch_iter];
}
else
{
LDA = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 ||
((K[batch_iter] == 0 || alpha[batch_iter] == (double)0.0) &&
(beta[batch_iter] == (double)1.0)))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_zherk(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
alpha[batch_iter],
arrayA[batch_iter],
lda[batch_iter],
beta[batch_iter],
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
wave.h | /**
Copyright 2013-2014 SYSTAP, LLC. http://www.systap.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This work was (partially) funded by the DARPA XDATA program under
AFRL Contract #FA8750-13-C-0002.
This material is based upon work supported by the Defense Advanced
Research Projects Agency (DARPA) under Contract No. D14PC00029.
*/
#include "mpi.h"
#include "kernel.cuh"
#include <GASengine/statistics.h>
#include <BitmapCompressor/Compressor.cuh>
#include <iostream>
#include <bitset>
#ifndef WAVE_H_
#define WAVE_H_
using namespace std;
using namespace MPI;
using namespace mpikernel;
class wave
//frontier contraction in a 2-d partitioned graph
{
public:
int pi; //row
int pj; //column
int p;
int n;
MPI_Group orig_group, new_row_group, new_col_group;
MPI_Comm new_row_comm, new_col_comm;
int new_row_rank, new_col_rank;
double init_time, propagate_time, broadcast_time, compression_time, copy_time, bitunion_time, decompression_time, propagate_wait, broadcast_wait;
double prop_row, prop_col;
Statistics* stats;
unsigned int *bitmap_compressed;
unsigned char *bitmap_decompressed;
unsigned char *out_copy, *assigned_temp, *prefix_temp;
Compressor* comp;
double compression_ratio_broadcast;
double compression_ratio;
unsigned int compressed_size;
public:
wave(int l_pi, int l_pj, int l_p, int l_n, Statistics* l_stats) :
init_time(0.0), compressed_size(0), propagate_time(0.0), propagate_wait(
0.0), broadcast_time(0.0), broadcast_wait(0.0), compression_time(
0.0), copy_time(0.0), bitunion_time(0.0), prop_row(0.0), prop_col(0.0)
//l_pi is the x index
//l_pj is the y index
//l_p is the number of partitions in 1d. usually, sqrt(number of processors)
//l_n is the size of the problem, number of vertices
{
double starttime, endtime;
starttime = MPI_Wtime();
pi = l_pi;
pj = l_pj;
p = l_p;
n = l_n;
stats = l_stats;
MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
//build original ranks for the processors
// int row_indices[p], col_indices[p + 1];
int *row_indices = new int[p];
int *col_indices = new int[p + 1];
for (int i = 0; i < p; i++)
row_indices[i] = pi * p + i;
/* for(int i=0;i<=pi-1;i++)
row_indices[i+p] = i*p+pi;
for(int i=pi+1;i<p;i++)
row_indices[i+p-1] = i*p+pi;
*/for (int i = 0; i < p; i++)
col_indices[i] = i * p + pj;
/* for(int i=0;i<=pj-1;i++)
col_indices[i] = i*p+pj;
for(int i=pj+1;i<p;i++)
col_indices[i-1] = i*p+pj;
col_indices[p-1] = pj*p+p-1;
*/
MPI_Group_incl(orig_group, p, row_indices, &new_row_group);
MPI_Group_incl(orig_group, p, col_indices, &new_col_group);
MPI_Comm_create(MPI_COMM_WORLD, new_row_group, &new_row_comm);
MPI_Comm_create(MPI_COMM_WORLD, new_col_group, &new_col_comm);
MPI_Group_rank(new_row_group, &new_row_rank);
MPI_Group_rank(new_col_group, &new_col_rank);
endtime = MPI_Wtime();
//MPI_Barrier(new_row_comm);
//MPI_Barrier(new_col_comm);
init_time = endtime - starttime;
propagate_time = 0;
propagate_wait = 0;
broadcast_time = 0;
broadcast_wait = 0;
copy_time = 0;
bitunion_time = 0;
util::B40CPerror(
cudaMalloc((void**)&out_copy,
ceil(n / 8.0) * sizeof (unsigned char)));
util::B40CPerror(
cudaMalloc((void**)&assigned_temp,
ceil(n / 8.0) * sizeof (unsigned char)));
util::B40CPerror(
cudaMalloc((void**)&prefix_temp,
ceil(n / 8.0) * sizeof (unsigned char)));
util::B40CPerror(
cudaMalloc((void**)&bitmap_compressed,
(n + 31 - 1) / 31 * sizeof (unsigned int)),
"CsrProblem cudaMalloc bitmap_compressed failed", __FILE__,
__LINE__);
util::B40CPerror(
cudaMemset(bitmap_compressed, 0,
(n + 31 - 1) / 31 * sizeof (unsigned int)),
"Memset bitmap_compressed failed", __FILE__, __LINE__);
util::B40CPerror(
cudaMalloc((void**)&bitmap_decompressed,
(n + 31 - 1) / 31 * sizeof (unsigned int)),
"CsrProblem cudaMalloc bitmap_decompressed failed", __FILE__,
__LINE__);
util::B40CPerror(
cudaMemset(bitmap_decompressed, 0,
(n + 31 - 1) / 31 * sizeof (unsigned int)),
"Memset bitmap_decompressed failed", __FILE__, __LINE__);
comp = new Compressor(n);
// comp = new Compressor(186);
}
// void propogate(unsigned char* out_d, unsigned char* assigned_d,
// unsigned char* prefix_d)
// //wave propogation, in sequential from top to bottom of the column
// {
// double starttime, endtime;
//
// unsigned int mesg_size = ceil(n / 8.0);
// int myid = pi * p + pj;
//
// int numthreads = 512;
// int byte_size = (n + 8 - 1) / 8;
// int numblocks = min(512, (byte_size + numthreads - 1) / numthreads);
//
//
// MPI_Request request[2];
// MPI_Status status[2];
// if (p > 1)
// {
//
// if (pj == 0)
// {
// starttime = MPI_Wtime();
// MPI_Isend(out_d, mesg_size, MPI_CHAR, myid + 1, pi,
// MPI_COMM_WORLD, &request[1]);
// MPI_Wait(&request[1], &status[1]);
//
//
// endtime = MPI_Wtime();
// propagate_time = endtime - starttime;
// }
//
// else if (pj != p - 1)
// {
// starttime = MPI_Wtime();
//
// MPI_Irecv(prefix_d, mesg_size, MPI_CHAR, myid - 1, pi, MPI_COMM_WORLD, &request[0]);
// MPI_Wait(&request[0], &status[0]);
// endtime = MPI_Wtime();
// propagate_time = endtime - starttime;
//
// starttime = MPI_Wtime();
// mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d, prefix_d, out_d);
// cudaDeviceSynchronize();
// endtime = MPI_Wtime();
// bitunion_time = endtime - starttime;
//
// starttime = MPI_Wtime();
// MPI_Isend(out_d, mesg_size, MPI_CHAR, myid + 1, pi, MPI_COMM_WORLD, &request[1]);
//
//
// MPI_Wait(&request[1], &status[1]);
//
// endtime = MPI_Wtime();
// propagate_time += endtime - starttime;
// }
//
// else
// {
// starttime = MPI_Wtime();
//
// MPI_Irecv(prefix_d, mesg_size, MPI_CHAR, myid - 1, pi,
// MPI_COMM_WORLD, &request[0]);
// MPI_Wait(&request[0], &status[0]);
//
// endtime = MPI_Wtime();
// propagate_time = endtime - starttime;
// mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d, prefix_d, out_d);
// cudaDeviceSynchronize();
// }
// }
//
// }
void propogate_tree(unsigned char* out_d, unsigned char* assigned_d,
unsigned char* prefix_d)
{
//printf("tree");
int distance = 1;
bitunion_time = 0.0;
unsigned int mesg_size = ceil(n / 8.0);
int rank_id = pi * p + pj;
cudaMemcpy(out_copy, out_d, mesg_size, cudaMemcpyDeviceToDevice);
double starttime, endtime, startbitunion, endbitunion;
double waitstart, waitend;
int numthreads = 256;
int numblocks = (mesg_size + numthreads - 1) / numthreads;
MPI_Request request;
MPI_Request requests[10];
MPI_Status status;
MPI_Status statuses[10];
starttime = MPI_Wtime();
// if (rank_id == 0 || rank_id == 1 || rank_id == 2 || rank_id == 3)
// {
// unsigned int *out_h = (unsigned int*)malloc(mesg_size);
// printf("myid=%d, m=%d, initial out_d:\n", rank_id, mesg_size);
// cudaMemcpy(out_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
// for (int i = 0; i < mesg_size; i++)
// {
// bitset < 8 > b(out_h[i]);
// cout << b << endl;
// }
// free(out_h);
// }
// cudaMemcpy(assigned_d, out_d, mesg_size, cudaMemcpyDeviceToDevice);
propagate_wait = 0.0;
int wait_count = 0;
while (distance < p)
{
if ((pj + distance) < p)
{
MPI_Isend(out_d, mesg_size, MPI_CHAR, rank_id + distance, pi,
MPI_COMM_WORLD, &requests[wait_count++]);
// waitstart = MPI_Wtime();
// MPI_Wait(&request[wait_count], &status);
// waitend = MPI_Wtime();
// propagate_wait += waitend - waitstart;
}
if ((pj - distance) >= 0)
{
MPI_Irecv(prefix_temp, mesg_size, MPI_CHAR, rank_id - distance,
pi, MPI_COMM_WORLD, &request);
waitstart = MPI_Wtime();
MPI_Wait(&request, &status);
waitend = MPI_Wtime();
propagate_wait += waitend - waitstart;
// value += x;
startbitunion = MPI_Wtime();
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d,
prefix_temp, out_d);
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size,
prefix_temp, prefix_d, prefix_d);
cudaDeviceSynchronize();
endbitunion = MPI_Wtime();
bitunion_time += endbitunion - startbitunion;
}
distance *= 2;
// MPI_Barrier(MPI_COMM_WORLD);
// if (rank_id == 3)
// {
// unsigned int *out_h = (unsigned int*)malloc(mesg_size);
// printf("myid=%d, m=%d, distance=%d, out_d:\n", rank_id, mesg_size, distance);
// cudaMemcpy(out_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
// for (int i = 0; i < mesg_size; i++)
// {
// bitset < 8 > b(out_h[i]);
// cout << b << endl;
// }
// free(out_h);
// }
}
// mpikernel::bitsubstract<<<numblocks, numthreads>>>(mesg_size, out_d,
// out_copy, prefix_d);
startbitunion = MPI_Wtime();
mpikernel::bitsubstract << <numblocks, numthreads >> >(mesg_size, out_copy,
prefix_d, assigned_temp);
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, assigned_d,
assigned_temp, assigned_d);
endbitunion = MPI_Wtime();
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
bitunion_time += endbitunion - startbitunion;
MPI_Waitall(wait_count, requests, statuses);
}
void propogate(unsigned char* out_d, unsigned char* assigned_d,
unsigned char* prefix_d)
//wave propogation, in sequential from top to bottom of the column
{
double starttime, endtime;
unsigned int mesg_size = ceil(n / 8.0);
int myid = pi * p + pj;
int numthreads = 512;
int byte_size = (n + 8 - 1) / 8;
int numblocks = (byte_size + numthreads - 1) / numthreads;
MPI_Request request[2];
MPI_Status status[2];
// MPI_Barrier(MPI_COMM_WORLD);
if (p > 1)
{
if (pj == 0)
{
starttime = MPI_Wtime();
MPI_Send(out_d, mesg_size, MPI_CHAR, myid + 1, pi,
MPI_COMM_WORLD);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
}
else if (pj != p - 1)
{
starttime = MPI_Wtime();
MPI_Recv(prefix_d, mesg_size, MPI_CHAR, myid - 1, pi,
MPI_COMM_WORLD, &status[1]);
//MPI_Wait(&request[0], &status[0]);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
starttime = MPI_Wtime();
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d,
prefix_d, out_d);
cudaDeviceSynchronize();
endtime = MPI_Wtime();
bitunion_time = endtime - starttime;
starttime = MPI_Wtime();
MPI_Send(out_d, mesg_size, MPI_CHAR, myid + 1, pi,
MPI_COMM_WORLD);
//MPI_Wait(&request[1], &status[1]);
endtime = MPI_Wtime();
propagate_time += endtime - starttime;
}
else
{
starttime = MPI_Wtime();
MPI_Recv(prefix_d, mesg_size, MPI_CHAR, myid - 1, pi,
MPI_COMM_WORLD, &status[1]);
//MPI_Wait(&request[0], &status[0]);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d,
prefix_d, out_d);
cudaDeviceSynchronize();
}
}
}
void correct_test(unsigned char* tmp1_h, unsigned char* tmp2_h,
int mesg_size)
{
int myid = pi * p + pj;
bool correct = true;
for (int i = 0; i < mesg_size; i++)
{
// bitset < 8 > b1(tmp1_h[i]);
// if (myid == 0)
// cout << b1 << endl;
// bitset < 8 > b2(tmp2_h[i]);
if (tmp1_h[i] != tmp2_h[i])
correct = false;
}
if (correct == false)
printf("myid: %d, Decompression error!!\n", myid);
else
printf("myid: %d, Decompression correct!!\n", myid);
}
void propogate_compressed(unsigned char* out_d, unsigned char* assigned_d,
unsigned char* prefix_d)
{
double starttime, endtime;
//byte number, NOT int number
unsigned int decompressed_size;
// MPI_Barrier(MPI_COMM_WORLD);
// starttime = MPI_Wtime();
unsigned int mesg_size = ceil(n / 8.0);
int myid = pi * p + pj;
//int lastid = pi*p+p-1;
int numthreads = 512;
int byte_size = (n + 8 - 1) / 8;
int numblocks = min(512, (byte_size + numthreads - 1) / numthreads);
// unsigned char *tmp1_h = (unsigned char*)malloc(mesg_size);
// unsigned char *tmp2_h = (unsigned char*)malloc(mesg_size);
double compress_start;
double compress_end;
compression_time = 0.0;
MPI_Request request[2];
MPI_Status status[2];
int tag = 0;
if (p > 1)
{
//if first one in the column, initiate the wave propogation
if (pj == 0)
{
// cudaMemcpy(tmp1_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
compress_start = MPI_Wtime();
comp->compress(out_d, bitmap_compressed, compressed_size);
compress_end = MPI_Wtime();
compression_time = compress_end - compress_start;
// if (myid == 0)
// {
// unsigned int *out_h = (unsigned int*)malloc(compressed_size / 4 * sizeof (unsigned int));
// printf("myid=%d, m=%d, bitmap_compressed0:\n", myid, compressed_size / 4);
// cudaMemcpy(out_h, bitmap_compressed, compressed_size / 4 * sizeof (unsigned int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < compressed_size / 4; i++)
// {
// bitset < 32 > b(out_h[i]);
// cout << b << endl;
// }
// }
//
// printf("myid=%d, compressed_size=%d, n=%d\n", myid, compressed_size, n);
starttime = MPI_Wtime();
MPI_Send(bitmap_compressed, compressed_size, MPI_BYTE, myid + 1,
tag, MPI_COMM_WORLD);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
// comp->decompress(compressed_size, bitmap_compressed,
// out_d, decompressed_size);
//
// cudaMemcpy(tmp2_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
//
// correct_test(tmp1_h, tmp2_h, mesg_size);
// MPI_Wait(&request[1], &status[1]);
//free(out_h);
}
//else if not the last one, receive bitmap from top, process and send to next one
else if (pj != p - 1)
{
// char *prefix_h = (char*)malloc(mesg_size);
// util::B40CPerror(
// cudaMemset(bitmap_compressed, 0,
// (n + 31 - 1) / 31 * sizeof (unsigned int)),
// "Memset bitmap_compressed failed", __FILE__, __LINE__);
starttime = MPI_Wtime();
int word_size = (n + 30) / 31;
MPI_Probe(myid - 1, tag, MPI_COMM_WORLD, &status[0]);
MPI_Get_count(&status[0], MPI_BYTE, (int*)&compressed_size);
MPI_Recv(bitmap_compressed, compressed_size, MPI_BYTE, myid - 1,
tag, MPI_COMM_WORLD, &status[0]);
// MPI_Wait(&request[0], &status[0]);
//MPI_Get_count(&status[0], MPI_BYTE, (int*)&compressed_size);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
// printf("myid=%d, compressed_size=%d, n=%d\n", myid, compressed_size, n);
compress_start = MPI_Wtime();
comp->decompress(compressed_size, bitmap_compressed,
bitmap_decompressed, decompressed_size);
cudaDeviceSynchronize();
compress_end = MPI_Wtime();
compression_time = compress_end - compress_start;
// cudaMemcpy(prefix_d, prefix_h, mesg_size, cudaMemcpyHostToDevice);
// mpikernel::bitsubstract << <numblocks, numthreads >> >(mesg_size, out_d, prefix_d, assigned_d);
// cudaDeviceSynchronize();
starttime = MPI_Wtime();
mpikernel::bitunion << <numblocks, numthreads >> >(byte_size, out_d,
bitmap_decompressed, out_d);
cudaDeviceSynchronize();
endtime = MPI_Wtime();
bitunion_time = endtime - starttime;
//cudaMemcpy(out_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
compress_start = MPI_Wtime();
comp->compress(out_d, bitmap_compressed, compressed_size);
cudaDeviceSynchronize();
compress_end = MPI_Wtime();
compression_time += compress_end - compress_start;
// cudaMemcpy(tmp1_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
//
// comp->decompress(compressed_size, bitmap_compressed,
// out_d, decompressed_size);
//
// cudaMemcpy(tmp2_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
//
// correct_test(tmp1_h, tmp2_h, mesg_size);
starttime = MPI_Wtime();
MPI_Send(bitmap_compressed, compressed_size, MPI_BYTE, myid + 1,
tag, MPI_COMM_WORLD);
endtime = MPI_Wtime();
propagate_time += endtime - starttime;
//f//ree(prefix_h);
// MPI_Wait(&request[1], &status[1]);
//free(out_h);
}
//else receive from the previous and then broadcast to the broadcast group
else
{
// char *prefix_h = (char*)malloc(mesg_size);
starttime = MPI_Wtime();
MPI_Probe(myid - 1, tag, MPI_COMM_WORLD, &status[0]);
MPI_Get_count(&status[0], MPI_BYTE, (int*)&compressed_size);
MPI_Recv(bitmap_compressed, compressed_size, MPI_BYTE, myid - 1,
tag, MPI_COMM_WORLD, &status[0]);
//int word_size = (n + 30) / 31;
//MPI_Recv(bitmap_compressed, word_size * sizeof (unsigned int),
// MPI_BYTE, myid - 1, tag, MPI_COMM_WORLD, &status[0]);
//MPI_Get_count(&status[0], MPI_BYTE, (int*)&compressed_size);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
compress_start = MPI_Wtime();
comp->decompress(compressed_size, bitmap_compressed,
bitmap_decompressed, decompressed_size);
cudaDeviceSynchronize();
compress_end = MPI_Wtime();
compression_time = compress_end - compress_start;
// MPI_Wait(&request[0], &status[0]);
//cudaMemcpy(prefix_d, prefix_h, mesg_size, cudaMemcpyHostToDevice);
//mpikernel::bitsubstract << <numblocks, numthreads >> >(mesg_size, out_d, prefix_d, assigned_d);
//cudaDeviceSynchronize();
starttime = MPI_Wtime();
mpikernel::bitunion << <numblocks, numthreads >> >(mesg_size, out_d,
(unsigned char*)bitmap_decompressed, out_d);
cudaDeviceSynchronize();
endtime = MPI_Wtime();
bitunion_time = endtime - starttime;
compressed_size = 0;
}
}
// endtime = MPI_Wtime();
// propagate_time = endtime - starttime - compression_time;
}
void broadcast_new_frontier_compressed(unsigned char* out_d,
unsigned char* in_d)
{
double starttime, endtime;
unsigned int mesg_size = ceil(n / (8.0));
int myid = pi * p + pj;
// unsigned char *out_h = (unsigned char*)malloc(mesg_size);
// unsigned char *in_h = (unsigned char*)malloc(mesg_size);
unsigned int compressed_size;
unsigned int decompressed_size;
MPI_Barrier(MPI_COMM_WORLD);
starttime = MPI_Wtime();
if (pj == p - 1)
{
// starttime = MPI_Wtime();
comp->compress(out_d, bitmap_compressed, compressed_size);
// endtime = MPI_Wtime();
// compression_time = endtime - starttime;
// comp->decompress(compressed_size, bitmap_compressed, bitmap_decompressed, decompressed_size);
// cudaMemcpy(out_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
// cudaMemcpy(in_h, bitmap_decompressed, mesg_size, cudaMemcpyDeviceToHost);
// if (pi == 0 && pj == p-1)
// {
// bool correct = true;
// for (int i = 0; i < mesg_size; i++)
// {
// bitset < 8 > b1(out_h[i]);
// bitset < 8 > b2(in_h[i]);
//// cout << b1 << " " << b2 << endl;
// if (out_h[i] != in_h[i])
// correct = false;
// }
// if(correct == false) printf("myid: %d, Decompression error!!\n", myid);
// else printf("myid: %d, Decompression correct!!\n", myid);
// }
// cudaMemcpy(in_h, bitmap_decompressed, mesg_size, cudaMemcpyDeviceToHost);
// if (pi == 0 && pj == p-1)
// {
// bool correct = true;
// for (int i = 0; i < mesg_size; i++)
// {
// bitset < 8 > b1(out_h[i]);
// bitset < 8 > b2(in_h[i]);
//// cout << b1 << " " << b2 << endl;
// if (out_h[i] != in_h[i])
// correct = false;
// }
// if(correct == false) printf("myid: %d, Decompression error!!\n", myid);
// else printf("myid: %d, Decompression correct!!\n", myid);
// }
}
MPI_Bcast(&compressed_size, 1, MPI_UNSIGNED, p - 1, new_row_comm);
MPI_Bcast(bitmap_compressed, compressed_size, MPI_BYTE, p - 1,
new_row_comm);
comp->decompress(compressed_size, bitmap_compressed, out_d,
decompressed_size);
// MPI_Bcast(bitmap_compressed, 8, MPI_BYTE, p - 1, new_row_comm);
// cudaMemcpy(out_d, out_h, mesg_size, cudaMemcpyHostToDevice);
//using bitmap_decompressed as temp buffer
// if (pi == pj)
// cudaMemcpy(bitmap_decompressed, bitmap_compressed, compressed_size, cudaMemcpyDeviceToDevice);
// unsigned int compressed_size2 = compressed_size;
MPI_Bcast(&compressed_size, 1, MPI_UNSIGNED, pj, new_col_comm);
MPI_Bcast(bitmap_compressed, compressed_size, MPI_BYTE, pj,
new_col_comm);
// MPI_Bcast(bitmap_compressed, 8, MPI_BYTE, pj, new_col_comm);
comp->decompress(compressed_size, bitmap_compressed, in_d,
decompressed_size);
// cudaMemcpy(out_d, out_h, mesg_size, cudaMemcpyHostToDevice);
// cudaMemcpy(in_d, in_h, mesg_size, cudaMemcpyHostToDevice);
// cudaDeviceSynchronize();
// free(in_h);
// free(out_h);
endtime = MPI_Wtime();
broadcast_time = endtime - starttime;
compression_ratio_broadcast = (double)compressed_size / mesg_size;
// if (pj==p-1)
// printf("myid: %d compressed_size: %d original_size: %d compression_ratio_broadcast: %lf\n",
// myid, compressed_size, mesg_size, compression_ratio_broadcast);
// printf("myid: %d broadcast_time: %lf\n", myid, broadcast_time);
}
//Version that does not support GPUDirect
void reduce_frontier_CPU(unsigned char* out_d, unsigned char* in_d)
{
double starttime, endtime;
unsigned int mesg_size = ceil(n / (8.0));
unsigned int word_size = (n + 30) / 31;
unsigned char *out_h = (unsigned char*)malloc(
word_size * sizeof (unsigned int));
unsigned char *out_h2 = (unsigned char*)malloc(mesg_size);
unsigned char *in_h = (unsigned char*)malloc(mesg_size);
cudaMemcpy(out_h, out_d, word_size * sizeof (unsigned int),
cudaMemcpyDeviceToHost);
// for(int i=0; i<mesg_size; i++)
// {
// out_h[i] = 0;
// }
// out_h[6] = 1 << 5;
// out_h[16] = 1 << 4;
// out_h[21] = 1 << 3;
//
// cudaMemcpy(out_d, out_h, 24, cudaMemcpyHostToDevice);
// printf("bitmap_out_d:\n");
// for (int i = 0; i < 24; i++)
// {
// bitset < 8 > b(out_h[i]);
// cout << b << endl;
// }
unsigned int compressed_size; //number of bytes
unsigned int decompressed_size; //number of bytes
starttime = MPI_Wtime();
comp->compress(out_d, bitmap_compressed, compressed_size);
endtime = MPI_Wtime();
compression_time = endtime - starttime;
starttime = MPI_Wtime();
comp->decompress(compressed_size, bitmap_compressed,
bitmap_decompressed, decompressed_size);
endtime = MPI_Wtime();
decompression_time = endtime - starttime;
starttime = MPI_Wtime();
MPI_Allreduce(out_h, out_h2, mesg_size, MPI_BYTE, MPI_BOR,
new_row_comm);
cudaMemcpy(out_d, out_h2, mesg_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
compression_ratio = (double)compressed_size / decompressed_size;
starttime = MPI_Wtime();
if (pi == pj)
memcpy(in_h, out_h2, mesg_size);
MPI_Bcast(in_h, mesg_size, MPI_CHAR, pj, new_col_comm);
endtime = MPI_Wtime();
broadcast_time = endtime - starttime;
cudaMemcpy(in_d, in_h, mesg_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
free(in_h);
free(out_h);
}
//version that supports GPUDirect
void reduce_frontier_GDR(unsigned char* out_d, unsigned char* in_d)
{
unsigned int mesg_size = ceil(n / (8.0));
// unsigned char *out_h = (unsigned char*) malloc(mesg_size);
// cudaMemcpy(out_h, out_d, mesg_size, cudaMemcpyDeviceToHost);
// for (int i = 0; i < mesg_size; i++)
// {
// bitset < 8 > b(out_h[i]);
// cout << b << endl;
// }
// comp->compress(out_d, bitmap_compressed);
double starttime, endtime;
// MPI_Barrier(MPI_COMM_WORLD);
starttime = MPI_Wtime();
MPI_Allreduce(out_d, out_d, mesg_size, MPI_BYTE, MPI_BOR, new_row_comm);
// MPI_Allreduce(out_d, out_d, mesg_size, MPI_BYTE, MPI_BOR, MPI_COMM_WORLD);
// MPI_Barrier(MPI_COMM_WORLD);
endtime = MPI_Wtime();
propagate_time = endtime - starttime;
if (pi == pj)
cudaMemcpy(in_d, out_d, mesg_size, cudaMemcpyDeviceToDevice);
// MPI_Barrier(MPI_COMM_WORLD);
starttime = MPI_Wtime();
MPI_Bcast(in_d, mesg_size, MPI_CHAR, pj, new_col_comm);
// MPI_Bcast(in_d, mesg_size, MPI_CHAR, pj, MPI_COMM_WORLD);
// MPI_Barrier(MPI_COMM_WORLD);
endtime = MPI_Wtime();
broadcast_time = endtime - starttime;
// if (pi == p - 1)
// {
// int count = 0;
// char *in_h = (char*)malloc(mesg_size);
// cudaMemcpy(in_h, in_d, mesg_size, cudaMemcpyDeviceToHost);
// //#pragma omp parallel for reduction(+:count)
// for (int i = 0; i < mesg_size; i++)
// {
// count += (int)(in_h[i] >> 0 && 1);
// count += (int)(in_h[i] >> 1 && 1);
// count += (int)(in_h[i] >> 2 && 1);
// count += (int)(in_h[i] >> 3 && 1);
// count += (int)(in_h[i] >> 4 && 1);
// count += (int)(in_h[i] >> 5 && 1);
// count += (int)(in_h[i] >> 6 && 1);
// count += (int)(in_h[i] >> 7 && 1);
// }
//
// printf(" %d", count);
// }
}
void broadcast_new_frontier_nonblocking(unsigned char* out_d,
unsigned char* in_d)
{
//printf("In Broadcast");
double starttime, endtime, waitstart, waitend;
MPI_Request request[2];
MPI_Status status[2];
broadcast_time = 0.0;
broadcast_wait = 0.0;
// MPI_Barrier(MPI_COMM_WORLD);
unsigned int mesg_size = ceil(n / (8.0));
starttime = MPI_Wtime();
MPI_Ibcast(out_d, mesg_size, MPI_CHAR, p - 1, new_row_comm,
&request[0]);
if (pi == pj)
{
waitstart = MPI_Wtime();
MPI_Wait(&request[0], &status[0]);
waitend = MPI_Wtime();
broadcast_wait += waitend - waitstart;
starttime = MPI_Wtime();
cudaMemcpy(in_d, out_d, mesg_size, cudaMemcpyDeviceToDevice);
endtime = MPI_Wtime();
copy_time = endtime - starttime;
MPI_Ibcast(in_d, mesg_size, MPI_CHAR, pj, new_col_comm,
&request[1]);
waitstart = MPI_Wtime();
MPI_Wait(&request[1], &status[1]);
waitend = MPI_Wtime();
broadcast_wait += waitend - waitstart;
}
else
{
MPI_Ibcast(in_d, mesg_size, MPI_CHAR, pj, new_col_comm,
&request[1]);
waitstart = MPI_Wtime();
MPI_Wait(&request[0], &status[0]);
MPI_Wait(&request[1], &status[1]);
waitend = MPI_Wtime();
broadcast_wait += waitend - waitstart;
}
endtime = MPI_Wtime();
broadcast_time += endtime - starttime;
}
void broadcast_new_frontier(unsigned char* out_d, unsigned char* in_d)
{
double starttime, endtime;
// MPI_Barrier(MPI_COMM_WORLD);
unsigned int mesg_size = ceil(n / (8.0));
starttime = MPI_Wtime();
MPI_Bcast(out_d, mesg_size, MPI_CHAR, p - 1, new_row_comm);
endtime = MPI_Wtime();
broadcast_time = endtime - starttime;
starttime = MPI_Wtime();
if (pi == pj)
cudaMemcpy(in_d, out_d, mesg_size, cudaMemcpyDeviceToDevice);
endtime = MPI_Wtime();
copy_time = endtime - starttime;
starttime = MPI_Wtime();
MPI_Bcast(in_d, mesg_size, MPI_CHAR, pj, new_col_comm);
endtime = MPI_Wtime();
broadcast_time += endtime - starttime;
}
void broadcast_tree(unsigned char* out_d, unsigned char* in_d, int iter)
{
double starttime, endtime;
double prop_start, prop_end;
int rank_id = pi * p + pj;
MPI_Request request[7];
MPI_Status status[3];
broadcast_wait = 0.0;
broadcast_time = 0.0;
// MPI_Barrier(MPI_COMM_WORLD);
unsigned int mesg_size = ceil(n / (8.0));
// cudaMemcpy(out_copy, out_d, mesg_size, cudaMemcpyDeviceToDevice);
// unsigned char* out = (unsigned char*)malloc(mesg_size);
// unsigned char* in = (unsigned char*)malloc(mesg_size);
//
// cudaMemcpy(out, out_d, mesg_size, cudaMemcpyDeviceToHost);
// if (pi == 0 && pj == 0)
// printf("mesg_size=%d\n", mesg_size);
// unsigned int in[2] = {0, 1};
// unsigned int out[2] = {0, 1};
starttime = MPI_Wtime();
prop_start = MPI_Wtime();
int seg_size = p;
while ((seg_size >> 1) > 0)
{
if ((pj + 1) % seg_size == 0)
{
int recv_rank = p * pi + pj - (seg_size >> 1);
MPI_Isend(out_d, mesg_size, MPI_CHAR, recv_rank, 0,
MPI_COMM_WORLD, &request[0]);
MPI_Wait(&request[0], &status[0]);
// MPI_Isend(out, mesg_size, MPI_CHAR, recv_rank, 0,
// MPI_COMM_WORLD, &request[0]);
// cudaMemcpy(out, out_d, 2 * sizeof (int), cudaMemcpyDeviceToHost);
// printf("Send: iter=%d, rank_id=%d, to=%d, out[0]=%u, out[1]=%u\n", iter, pi * p + pj, recv_rank, out[0], out[1]);
}
if ((pj + 1 + (seg_size >> 1)) % seg_size == 0)
{
int send_rank = p * pi + pj + (seg_size >> 1);
MPI_Irecv(out_d, mesg_size, MPI_CHAR, send_rank, 0, MPI_COMM_WORLD, &request[1]);
// MPI_Irecv(out, mesg_size, MPI_CHAR, send_rank, 0, MPI_COMM_WORLD, &request[1]);
MPI_Wait(&request[1], &status[0]);
// cudaMemcpy(in, out_d, 2 * sizeof (int), cudaMemcpyDeviceToHost);
// printf("Recv: iter=%d, rank_id=%d, from=%d, out[0]=%u, out[1]=%u\n", iter, pi * p + pj, send_rank, in[0], in[1]);
}
seg_size >>= 1;
// fflush(stdout);
// usleep(1000);
// MPI_Barrier(MPI_COMM_WORLD);
}
// MPI_Barrier(MPI_COMM_WORLD);
// cudaMemcpy(out_d, out_copy, msg_size, cudaMemcpyDeviceToDevice);
prop_end = MPI_Wtime();
prop_row = prop_end - prop_start;
// cudaDeviceSynchronize();
// MPI_Barrier(MPI_COMM_WORLD);
prop_start = MPI_Wtime();
if (pj == p - 1)
{
int recv_rank = pi;
MPI_Isend(out_d, mesg_size, MPI_CHAR, recv_rank, 1,
MPI_COMM_WORLD, &request[2]);
MPI_Wait(&request[2], &status[0]);
// MPI_Isend(out, mesg_size, MPI_CHAR, recv_rank, 1,
// MPI_COMM_WORLD, &request[2]);
// printf("pi=%d, pj=%d, to=%d\n", pi, pj, recv_rank);
recv_rank = (p >> 1) * p + pi;
MPI_Isend(out_d, mesg_size, MPI_CHAR, recv_rank, 1,
MPI_COMM_WORLD, &request[3]);
// MPI_Wait(&request[3], &status[0]);
// MPI_Isend(out, mesg_size, MPI_CHAR, recv_rank, 1,
// MPI_COMM_WORLD, &request[3]);
// printf("pi=%d, pj=%d, to=%d\n", pi, pj, recv_rank);
}
if (pi == 0 || pi == (p >> 1))
{
int send_rank = pj * p + p - 1;
MPI_Irecv(in_d, mesg_size, MPI_CHAR, send_rank,
1, MPI_COMM_WORLD, &request[4]);
// MPI_Irecv(in, mesg_size, MPI_CHAR, send_rank,
// 1, MPI_COMM_WORLD, &request[4]);
// printf("pi=%d, pj=%d, from=%d\n", pi, pj, send_rank);
MPI_Wait(&request[4], &status[1]);
}
// MPI_Barrier(MPI_COMM_WORLD);
seg_size = p >> 1;
while ((seg_size >> 1) > 0)
{
if (pi % seg_size == 0)
{
int recv_rank = p * (pi + (seg_size >> 1)) + pj;
MPI_Isend(in_d, mesg_size, MPI_CHAR, recv_rank, 1,
MPI_COMM_WORLD, &request[5]);
MPI_Wait(&request[5], &status[0]);
// MPI_Isend(in, mesg_size, MPI_CHAR, recv_rank, 1,
// MPI_COMM_WORLD, &request[5]);
// printf("pi=%d, pj=%d, to=%d\n", pi, pj, recv_rank);
}
if ((pi - (seg_size >> 1)) % seg_size == 0)
{
int send_rank = (pi - (seg_size >> 1)) * p + pj;
MPI_Irecv(in_d, mesg_size, MPI_CHAR, send_rank,
1, MPI_COMM_WORLD, &request[6]);
// MPI_Irecv(in, mesg_size, MPI_CHAR, send_rank,
// 1, MPI_COMM_WORLD, &request[6]);
// printf("pi=%d, pj=%d, from=%d\n", pi, pj, send_rank);
MPI_Wait(&request[6], &status[2]);
}
seg_size >>= 1;
// fflush(stdout);
// usleep(1000);
// MPI_Barrier(MPI_COMM_WORLD);
}
// cudaMemcpy(out_d, out, mesg_size, cudaMemcpyHostToDevice);
// cudaMemcpy(in_d, in, mesg_size, cudaMemcpyHostToDevice);
// MPI_Barrier(MPI_COMM_WORLD);
// cudaMemcpy(out_d, out_copy, mesg_size, cudaMemcpyDeviceToDevice);
prop_end = MPI_Wtime();
endtime = MPI_Wtime();
broadcast_time = endtime - starttime;
prop_col = prop_end - prop_start;
}
};
#endif /* WAVE_H_ */
|
spmm.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <algorithm>
#include <limits>
#include <memory>
#include "spmm_binary_ops.h"
#if !defined(_WIN32)
#ifdef USE_AVX
#include "intel/cpu_support.h"
#ifdef USE_LIBXSMM
#include "spmm_blocking_libxsmm.h"
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
namespace dgl {
namespace aten {
namespace cpu {
#if !defined(_WIN32)
#ifdef USE_AVX
/*!
* \brief CPU kernel of SpMM on Csr format using Xbyak.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. For each edge, it uses the
* JIT'ed kernel.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrXbyak(dgl::ElemWiseAddUpdate<Op>* cpu_spec, const BcastOff& bcast,
const CSRMatrix& csr, const DType* X, const DType* W, DType* O) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
cpu_spec->run(out_off, X + cid * lhs_dim, W + eid * rhs_dim, dim);
}
}
}
#endif // USE_AVX
#endif // _WIN32
/*!
* \brief Naive CPU kernel of SpMM on Csr format.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrNaive(const BcastOff& bcast, const CSRMatrix& csr, const DType* X,
const DType* W, DType* O) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
out_off[k] += Op::Call(lhs_off, rhs_off);
}
}
}
}
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
CHECK_NOTNULL(indptr);
CHECK_NOTNULL(O);
if (Op::use_lhs) {
CHECK_NOTNULL(indices);
CHECK_NOTNULL(X);
}
if (Op::use_rhs) {
if (has_idx)
CHECK_NOTNULL(edges);
CHECK_NOTNULL(W);
}
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
const bool no_libxsmm =
bcast.use_bcast || std::is_same<DType, double>::value;
if (!no_libxsmm) {
SpMMSumCsrLibxsmm<IdType, DType, Op>(bcast, csr, ufeat, efeat, out);
} else {
#endif // USE_LIBXSMM
typedef dgl::ElemWiseAddUpdate<Op> ElemWiseUpd;
/* Prepare an assembler kernel */
static std::unique_ptr<ElemWiseUpd> asm_kernel_ptr(
(dgl::IntelKernel<>::IsEnabled()) ? new ElemWiseUpd() : nullptr);
/* Distribute the kernel among OMP threads */
ElemWiseUpd* cpu_spec = (asm_kernel_ptr && asm_kernel_ptr->applicable())
? asm_kernel_ptr.get()
: nullptr;
if (cpu_spec && dim > 16 && !bcast.use_bcast) {
SpMMSumCsrXbyak<IdType, DType, Op>(cpu_spec, bcast, csr, X, W, O);
} else {
#endif // USE_AVX
#endif // _WIN32
SpMMSumCsrNaive<IdType, DType, Op>(bcast, csr, X, W, O);
#if !defined(_WIN32)
#ifdef USE_AVX
}
#ifdef USE_LIBXSMM
}
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat,
NDArray efeat, NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx ? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (val != 0) {
#pragma omp atomic
out_off[k] += val;
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \param arge Arg-Min/Max on edges. which refers the source node
* indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \note It uses node parallel strategy, different threads are
* responsible for the computation of different nodes. \note The result will
* contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges =
has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
CHECK_NOTNULL(indptr);
CHECK_NOTNULL(O);
if (Op::use_lhs) {
CHECK_NOTNULL(indices);
CHECK_NOTNULL(X);
CHECK_NOTNULL(argX);
}
if (Op::use_rhs) {
if (has_idx)
CHECK_NOTNULL(edges);
CHECK_NOTNULL(W);
CHECK_NOTNULL(argW);
}
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
const bool no_libxsmm =
bcast.use_bcast || std::is_same<DType, double>::value;
if (!no_libxsmm) {
SpMMCmpCsrLibxsmm<IdType, DType, Op, Cmp>(bcast, csr, ufeat, efeat, out, argu, arge);
} else {
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs) argx_off[k] = cid;
if (Op::use_rhs) argw_off[k] = eid;
}
}
}
}
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
}
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \param arge Arg-Min/Max on edges. which refers the source node
* indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \note it uses node parallel strategy, different threads are
* responsible for the computation of different nodes. To avoid possible data
* hazard, we use atomic operators in the reduction phase. \note The result will
* contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges =
has_idx ? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx ? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs ? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs ? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs) argx_off[k] = rid;
if (Op::use_rhs) argw_off[k] = eid;
}
}
}
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
convolutiondepthwise_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k10 = vld1q_f16(k0 + 24);
float16x8_t _k11 = vld1q_f16(k0 + 32);
float16x8_t _k12 = vld1q_f16(k0 + 40);
float16x8_t _k20 = vld1q_f16(k0 + 48);
float16x8_t _k21 = vld1q_f16(k0 + 56);
float16x8_t _k22 = vld1q_f16(k0 + 64);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r10 r11 r12 r13
"mov v24.16b, %21.16b \n" // sum00
"mov v25.16b, %21.16b \n" // sum01
"mov v26.16b, %21.16b \n" // sum02
"mov v27.16b, %21.16b \n" // sum03
"mov v28.16b, %21.16b \n" // sum10
"mov v29.16b, %21.16b \n" // sum11
"mov v30.16b, %21.16b \n" // sum12
"mov v31.16b, %21.16b \n" // sum13
"fmla v24.8h, %15.8h, v12.8h \n"
"fmla v25.8h, %15.8h, v13.8h \n"
"fmla v26.8h, %15.8h, v14.8h \n"
"fmla v27.8h, %15.8h, v15.8h \n"
"fmla v28.8h, %12.8h, v12.8h \n"
"fmla v29.8h, %12.8h, v13.8h \n"
"fmla v30.8h, %12.8h, v14.8h \n"
"fmla v31.8h, %12.8h, v15.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3] \n" // r14 r15
"fmla v24.8h, %16.8h, v13.8h \n"
"fmla v25.8h, %16.8h, v14.8h \n"
"fmla v26.8h, %16.8h, v15.8h \n"
"fmla v27.8h, %16.8h, v16.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v29.8h, %13.8h, v14.8h \n"
"fmla v30.8h, %13.8h, v15.8h \n"
"fmla v31.8h, %13.8h, v16.8h \n"
"fmla v24.8h, %17.8h, v14.8h \n"
"fmla v25.8h, %17.8h, v15.8h \n"
"fmla v26.8h, %17.8h, v16.8h \n"
"fmla v27.8h, %17.8h, v17.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%4], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, %14.8h, v14.8h \n"
"fmla v29.8h, %14.8h, v15.8h \n"
"fmla v30.8h, %14.8h, v16.8h \n"
"fmla v31.8h, %14.8h, v17.8h \n"
"fmla v24.8h, %18.8h, v18.8h \n"
"fmla v25.8h, %18.8h, v19.8h \n"
"fmla v26.8h, %18.8h, v20.8h \n"
"fmla v27.8h, %18.8h, v21.8h \n"
"fmla v28.8h, %15.8h, v18.8h \n"
"fmla v29.8h, %15.8h, v19.8h \n"
"fmla v30.8h, %15.8h, v20.8h \n"
"fmla v31.8h, %15.8h, v21.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v22.8h, v23.8h}, [%4] \n" // r24 r25
"fmla v24.8h, %19.8h, v19.8h \n"
"fmla v25.8h, %19.8h, v20.8h \n"
"fmla v26.8h, %19.8h, v21.8h \n"
"fmla v27.8h, %19.8h, v22.8h \n"
"fmla v28.8h, %16.8h, v19.8h \n"
"fmla v29.8h, %16.8h, v20.8h \n"
"fmla v30.8h, %16.8h, v21.8h \n"
"fmla v31.8h, %16.8h, v22.8h \n"
"fmla v24.8h, %20.8h, v20.8h \n"
"fmla v25.8h, %20.8h, v21.8h \n"
"fmla v26.8h, %20.8h, v22.8h \n"
"fmla v27.8h, %20.8h, v23.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r00 r01 r02 r03
"fmla v28.8h, %17.8h, v20.8h \n"
"fmla v29.8h, %17.8h, v21.8h \n"
"fmla v30.8h, %17.8h, v22.8h \n"
"fmla v31.8h, %17.8h, v23.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%5], #64 \n" // r30 r31 r32 r33
"fmla v24.8h, %12.8h, v12.8h \n"
"fmla v25.8h, %12.8h, v13.8h \n"
"fmla v26.8h, %12.8h, v14.8h \n"
"fmla v27.8h, %12.8h, v15.8h \n"
"fmla v28.8h, %18.8h, v18.8h \n"
"fmla v29.8h, %18.8h, v19.8h \n"
"fmla v30.8h, %18.8h, v20.8h \n"
"fmla v31.8h, %18.8h, v21.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.8h, v17.8h}, [%2] \n" // r04 r05
"fmla v24.8h, %13.8h, v13.8h \n"
"fmla v25.8h, %13.8h, v14.8h \n"
"fmla v26.8h, %13.8h, v15.8h \n"
"fmla v27.8h, %13.8h, v16.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v22.8h, v23.8h}, [%5] \n" // r34 r35
"fmla v28.8h, %19.8h, v19.8h \n"
"fmla v29.8h, %19.8h, v20.8h \n"
"fmla v30.8h, %19.8h, v21.8h \n"
"fmla v31.8h, %19.8h, v22.8h \n"
"fmla v24.8h, %14.8h, v14.8h \n"
"fmla v25.8h, %14.8h, v15.8h \n"
"fmla v26.8h, %14.8h, v16.8h \n"
"fmla v27.8h, %14.8h, v17.8h \n"
"fmla v28.8h, %20.8h, v20.8h \n"
"fmla v29.8h, %20.8h, v21.8h \n"
"fmla v30.8h, %20.8h, v22.8h \n"
"fmla v31.8h, %20.8h, v23.8h \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3] \n" // r10 r11 r12 r13
"mov v28.16b, %21.16b \n" // sum00
"mov v29.16b, %21.16b \n" // sum01
"mov v30.16b, %21.16b \n" // sum10
"mov v31.16b, %21.16b \n" // sum11
"fmla v28.8h, %15.8h, v16.8h \n"
"fmla v30.8h, %12.8h, v16.8h \n"
"fmla v29.8h, %15.8h, v17.8h \n"
"fmla v31.8h, %12.8h, v17.8h \n"
"fmla v28.8h, %16.8h, v17.8h \n"
"fmla v30.8h, %13.8h, v17.8h \n"
"fmla v29.8h, %16.8h, v18.8h \n"
"fmla v31.8h, %13.8h, v18.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" // r20 r21 r22 r23
"fmla v28.8h, %17.8h, v18.8h \n"
"fmla v30.8h, %14.8h, v18.8h \n"
"fmla v29.8h, %17.8h, v19.8h \n"
"fmla v31.8h, %14.8h, v19.8h \n"
"fmla v28.8h, %18.8h, v20.8h \n"
"fmla v30.8h, %15.8h, v20.8h \n"
"fmla v29.8h, %18.8h, v21.8h \n"
"fmla v31.8h, %15.8h, v21.8h \n"
"fmla v28.8h, %19.8h, v21.8h \n"
"fmla v30.8h, %16.8h, v21.8h \n"
"fmla v29.8h, %19.8h, v22.8h \n"
"fmla v31.8h, %16.8h, v22.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2] \n" // r00 r01 r02 r03
"fmla v28.8h, %20.8h, v22.8h \n"
"fmla v30.8h, %17.8h, v22.8h \n"
"fmla v29.8h, %20.8h, v23.8h \n"
"fmla v31.8h, %17.8h, v23.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%5] \n" // r30 r31 r32 r33
"fmla v28.8h, %12.8h, v12.8h \n"
"fmla v30.8h, %18.8h, v24.8h \n"
"fmla v29.8h, %12.8h, v13.8h \n"
"fmla v31.8h, %18.8h, v25.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v30.8h, %19.8h, v25.8h \n"
"fmla v29.8h, %13.8h, v14.8h \n"
"fmla v31.8h, %19.8h, v26.8h \n"
"fmla v28.8h, %14.8h, v14.8h \n"
"fmla v30.8h, %20.8h, v26.8h \n"
"fmla v29.8h, %14.8h, v15.8h \n"
"fmla v31.8h, %20.8h, v27.8h \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"add %4, %4, #32 \n"
"add %5, %5, #32 \n"
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
"st1 {v30.8h, v31.8h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v15.8h, v16.8h, v17.8h}, [%3] \n" // r10 r11 r12
"mov v28.16b, %21.16b \n" // sum00
"mov v30.16b, %21.16b \n" // sum10
"fmla v28.8h, %15.8h, v15.8h \n"
"fmla v30.8h, %12.8h, v15.8h \n"
"fmla v28.8h, %16.8h, v16.8h \n"
"fmla v30.8h, %13.8h, v16.8h \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v18.8h, v19.8h, v20.8h}, [%4] \n" // r20 r21 r22
"fmla v28.8h, %17.8h, v17.8h \n"
"fmla v30.8h, %14.8h, v17.8h \n"
"fmla v28.8h, %18.8h, v18.8h \n"
"fmla v30.8h, %15.8h, v18.8h \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%2] \n" // r00 r01 r02
"fmla v28.8h, %19.8h, v19.8h \n"
"fmla v30.8h, %16.8h, v19.8h \n"
"fmla v28.8h, %20.8h, v20.8h \n"
"fmla v30.8h, %17.8h, v20.8h \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v21.8h, v22.8h, v23.8h}, [%5] \n" // r30 r31 r32
"fmla v28.8h, %12.8h, v12.8h \n"
"fmla v30.8h, %18.8h, v21.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v30.8h, %19.8h, v22.8h \n"
"fmla v28.8h, %14.8h, v14.8h \n"
"fmla v30.8h, %20.8h, v23.8h \n"
"add %2, %2, #16 \n"
"add %3, %3, #16 \n"
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"st1 {v28.8h}, [%0], #16 \n"
"st1 {v30.8h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v30");
}
r0 += 2 * 8 + w * 8;
r1 += 2 * 8 + w * 8;
r2 += 2 * 8 + w * 8;
r3 += 2 * 8 + w * 8;
outptr0 += outw * 8;
outptr1 += outw * 8;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"mov v30.16b, %17.16b \n" // sum02
"mov v31.16b, %17.16b \n" // sum03
"fmla v28.8h, %8.8h, v12.8h \n"
"fmla v29.8h, %8.8h, v13.8h \n"
"fmla v30.8h, %8.8h, v14.8h \n"
"fmla v31.8h, %8.8h, v15.8h \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.8h, v17.8h}, [%1] \n" // r04 r05
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v29.8h, %9.8h, v14.8h \n"
"fmla v30.8h, %9.8h, v15.8h \n"
"fmla v31.8h, %9.8h, v16.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, %10.8h, v14.8h \n"
"fmla v29.8h, %10.8h, v15.8h \n"
"fmla v30.8h, %10.8h, v16.8h \n"
"fmla v31.8h, %10.8h, v17.8h \n"
"fmla v28.8h, %11.8h, v18.8h \n"
"fmla v29.8h, %11.8h, v19.8h \n"
"fmla v30.8h, %11.8h, v20.8h \n"
"fmla v31.8h, %11.8h, v21.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v22.8h, v23.8h}, [%2] \n" // r14 r15
"fmla v28.8h, %12.8h, v19.8h \n"
"fmla v29.8h, %12.8h, v20.8h \n"
"fmla v30.8h, %12.8h, v21.8h \n"
"fmla v31.8h, %12.8h, v22.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, %13.8h, v20.8h \n"
"fmla v29.8h, %13.8h, v21.8h \n"
"fmla v30.8h, %13.8h, v22.8h \n"
"fmla v31.8h, %13.8h, v23.8h \n"
"fmla v28.8h, %14.8h, v12.8h \n"
"fmla v29.8h, %14.8h, v13.8h \n"
"fmla v30.8h, %14.8h, v14.8h \n"
"fmla v31.8h, %14.8h, v15.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3] \n" // r24 r25
"fmla v28.8h, %15.8h, v13.8h \n"
"fmla v29.8h, %15.8h, v14.8h \n"
"fmla v30.8h, %15.8h, v15.8h \n"
"fmla v31.8h, %15.8h, v16.8h \n"
"fmla v28.8h, %16.8h, v14.8h \n"
"fmla v29.8h, %16.8h, v15.8h \n"
"fmla v30.8h, %16.8h, v16.8h \n"
"fmla v31.8h, %16.8h, v17.8h \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1] \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, %8.8h, v12.8h \n"
"fmla v29.8h, %8.8h, v13.8h \n"
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v29.8h, %9.8h, v14.8h \n"
"fmla v28.8h, %10.8h, v14.8h \n"
"fmla v29.8h, %10.8h, v15.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, %11.8h, v16.8h \n"
"fmla v29.8h, %11.8h, v17.8h \n"
"fmla v28.8h, %12.8h, v17.8h \n"
"fmla v29.8h, %12.8h, v18.8h \n"
"fmla v28.8h, %13.8h, v18.8h \n"
"fmla v29.8h, %13.8h, v19.8h \n"
"fmla v28.8h, %14.8h, v20.8h \n"
"fmla v29.8h, %14.8h, v21.8h \n"
"fmla v28.8h, %15.8h, v21.8h \n"
"fmla v29.8h, %15.8h, v22.8h \n"
"fmla v28.8h, %16.8h, v22.8h \n"
"fmla v29.8h, %16.8h, v23.8h \n"
"add %1, %1, #32 \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%1] \n" // r00 r01 r02
"mov v28.16b, %17.16b \n" // sum00
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v15.8h, v16.8h, v17.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, %8.8h, v12.8h \n"
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v28.8h, %10.8h, v14.8h \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v18.8h, v19.8h, v20.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, %11.8h, v15.8h \n"
"fmla v28.8h, %12.8h, v16.8h \n"
"fmla v28.8h, %13.8h, v17.8h \n"
"fmla v28.8h, %14.8h, v18.8h \n"
"fmla v28.8h, %15.8h, v19.8h \n"
"fmla v28.8h, %16.8h, v20.8h \n"
"add %1, %1, #16 \n"
"add %2, %2, #16 \n"
"add %3, %3, #16 \n"
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v28");
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k10 = vld1q_f16(k0 + 24);
float16x8_t _k11 = vld1q_f16(k0 + 32);
float16x8_t _k12 = vld1q_f16(k0 + 40);
float16x8_t _k20 = vld1q_f16(k0 + 48);
float16x8_t _k21 = vld1q_f16(k0 + 56);
float16x8_t _k22 = vld1q_f16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _sum0 = _bias0;
float16x8_t _sum1 = _bias0;
float16x8_t _sum2 = _bias0;
float16x8_t _sum3 = _bias0;
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _r08 = vld1q_f16(r0 + 64);
float16x8_t _r10 = vld1q_f16(r1);
float16x8_t _r11 = vld1q_f16(r1 + 8);
float16x8_t _r12 = vld1q_f16(r1 + 16);
float16x8_t _r13 = vld1q_f16(r1 + 24);
float16x8_t _r14 = vld1q_f16(r1 + 32);
float16x8_t _r15 = vld1q_f16(r1 + 40);
float16x8_t _r16 = vld1q_f16(r1 + 48);
float16x8_t _r17 = vld1q_f16(r1 + 56);
float16x8_t _r18 = vld1q_f16(r1 + 64);
float16x8_t _r20 = vld1q_f16(r2);
float16x8_t _r21 = vld1q_f16(r2 + 8);
float16x8_t _r22 = vld1q_f16(r2 + 16);
float16x8_t _r23 = vld1q_f16(r2 + 24);
float16x8_t _r24 = vld1q_f16(r2 + 32);
float16x8_t _r25 = vld1q_f16(r2 + 40);
float16x8_t _r26 = vld1q_f16(r2 + 48);
float16x8_t _r27 = vld1q_f16(r2 + 56);
float16x8_t _r28 = vld1q_f16(r2 + 64);
_sum0 = vfmaq_f16(_sum0, _k00, _r00);
_sum0 = vfmaq_f16(_sum0, _k01, _r01);
_sum0 = vfmaq_f16(_sum0, _k02, _r02);
_sum0 = vfmaq_f16(_sum0, _k10, _r10);
_sum0 = vfmaq_f16(_sum0, _k11, _r11);
_sum0 = vfmaq_f16(_sum0, _k12, _r12);
_sum0 = vfmaq_f16(_sum0, _k20, _r20);
_sum0 = vfmaq_f16(_sum0, _k21, _r21);
_sum0 = vfmaq_f16(_sum0, _k22, _r22);
_sum1 = vfmaq_f16(_sum1, _k00, _r02);
_sum1 = vfmaq_f16(_sum1, _k01, _r03);
_sum1 = vfmaq_f16(_sum1, _k02, _r04);
_sum1 = vfmaq_f16(_sum1, _k10, _r12);
_sum1 = vfmaq_f16(_sum1, _k11, _r13);
_sum1 = vfmaq_f16(_sum1, _k12, _r14);
_sum1 = vfmaq_f16(_sum1, _k20, _r22);
_sum1 = vfmaq_f16(_sum1, _k21, _r23);
_sum1 = vfmaq_f16(_sum1, _k22, _r24);
_sum2 = vfmaq_f16(_sum2, _k00, _r04);
_sum2 = vfmaq_f16(_sum2, _k01, _r05);
_sum2 = vfmaq_f16(_sum2, _k02, _r06);
_sum2 = vfmaq_f16(_sum2, _k10, _r14);
_sum2 = vfmaq_f16(_sum2, _k11, _r15);
_sum2 = vfmaq_f16(_sum2, _k12, _r16);
_sum2 = vfmaq_f16(_sum2, _k20, _r24);
_sum2 = vfmaq_f16(_sum2, _k21, _r25);
_sum2 = vfmaq_f16(_sum2, _k22, _r26);
_sum3 = vfmaq_f16(_sum3, _k00, _r06);
_sum3 = vfmaq_f16(_sum3, _k01, _r07);
_sum3 = vfmaq_f16(_sum3, _k02, _r08);
_sum3 = vfmaq_f16(_sum3, _k10, _r16);
_sum3 = vfmaq_f16(_sum3, _k11, _r17);
_sum3 = vfmaq_f16(_sum3, _k12, _r18);
_sum3 = vfmaq_f16(_sum3, _k20, _r26);
_sum3 = vfmaq_f16(_sum3, _k21, _r27);
_sum3 = vfmaq_f16(_sum3, _k22, _r28);
vst1q_f16(outptr0, _sum0);
vst1q_f16(outptr0 + 8, _sum1);
vst1q_f16(outptr0 + 16, _sum2);
vst1q_f16(outptr0 + 24, _sum3);
r0 += 8 * 8;
r1 += 8 * 8;
r2 += 8 * 8;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _sum0 = _bias0;
float16x8_t _sum1 = _bias0;
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r10 = vld1q_f16(r1);
float16x8_t _r11 = vld1q_f16(r1 + 8);
float16x8_t _r12 = vld1q_f16(r1 + 16);
float16x8_t _r13 = vld1q_f16(r1 + 24);
float16x8_t _r14 = vld1q_f16(r1 + 32);
float16x8_t _r20 = vld1q_f16(r2);
float16x8_t _r21 = vld1q_f16(r2 + 8);
float16x8_t _r22 = vld1q_f16(r2 + 16);
float16x8_t _r23 = vld1q_f16(r2 + 24);
float16x8_t _r24 = vld1q_f16(r2 + 32);
_sum0 = vfmaq_f16(_sum0, _k00, _r00);
_sum0 = vfmaq_f16(_sum0, _k01, _r01);
_sum0 = vfmaq_f16(_sum0, _k02, _r02);
_sum0 = vfmaq_f16(_sum0, _k10, _r10);
_sum0 = vfmaq_f16(_sum0, _k11, _r11);
_sum0 = vfmaq_f16(_sum0, _k12, _r12);
_sum0 = vfmaq_f16(_sum0, _k20, _r20);
_sum0 = vfmaq_f16(_sum0, _k21, _r21);
_sum0 = vfmaq_f16(_sum0, _k22, _r22);
_sum1 = vfmaq_f16(_sum1, _k00, _r02);
_sum1 = vfmaq_f16(_sum1, _k01, _r03);
_sum1 = vfmaq_f16(_sum1, _k02, _r04);
_sum1 = vfmaq_f16(_sum1, _k10, _r12);
_sum1 = vfmaq_f16(_sum1, _k11, _r13);
_sum1 = vfmaq_f16(_sum1, _k12, _r14);
_sum1 = vfmaq_f16(_sum1, _k20, _r22);
_sum1 = vfmaq_f16(_sum1, _k21, _r23);
_sum1 = vfmaq_f16(_sum1, _k22, _r24);
vst1q_f16(outptr0, _sum0);
vst1q_f16(outptr0 + 8, _sum1);
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
outptr0 += 16;
}
for (; j < outw; j++)
{
float16x8_t _sum0 = _bias0;
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r10 = vld1q_f16(r1);
float16x8_t _r11 = vld1q_f16(r1 + 8);
float16x8_t _r12 = vld1q_f16(r1 + 16);
float16x8_t _r20 = vld1q_f16(r2);
float16x8_t _r21 = vld1q_f16(r2 + 8);
float16x8_t _r22 = vld1q_f16(r2 + 16);
_sum0 = vfmaq_f16(_sum0, _k00, _r00);
_sum0 = vfmaq_f16(_sum0, _k01, _r01);
_sum0 = vfmaq_f16(_sum0, _k02, _r02);
_sum0 = vfmaq_f16(_sum0, _k10, _r10);
_sum0 = vfmaq_f16(_sum0, _k11, _r11);
_sum0 = vfmaq_f16(_sum0, _k12, _r12);
_sum0 = vfmaq_f16(_sum0, _k20, _r20);
_sum0 = vfmaq_f16(_sum0, _k21, _r21);
_sum0 = vfmaq_f16(_sum0, _k22, _r22);
vst1q_f16(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_unop__identity_fc64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_fp64
// op(A') function: GB_unop_tran__identity_fc64_fp64
// C type: GxB_FC64_t
// A type: double
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_fp64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
atomic-21.c | /* { dg-do compile } */
/* { dg-additional-options "-fdump-tree-original" } */
/* { dg-final { scan-tree-dump-times "omp atomic release" 4 "original" } } */
/* { dg-final { scan-tree-dump-times "omp atomic read acquire" 1 "original" } } */
/* { dg-final { scan-tree-dump-times "omp atomic capture acq_rel" 1 "original" } } */
int i, j, k, l, m, n;
void
foo ()
{
#pragma omp atomic release
i = i + 1;
}
#pragma omp requires atomic_default_mem_order (acq_rel)
void
bar ()
{
int v;
#pragma omp atomic
j = j + 1;
#pragma omp atomic update
k = k + 1;
#pragma omp atomic read
v = l;
#pragma omp atomic write
m = v;
#pragma omp atomic capture
v = n = n + 1;
}
|
exp4_omp_v0.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main()
{
int i,j,n,m,temp,a[100][100];
n=m=7;
#pragma omp parallel
{
for(i=0;i<=n*m-1;i++) {
temp=i/m+1;
j=i%m+1;
sleep(1);
a[temp][j]=temp+100*(j-1);
}
}
for(i=0;i<=n*m-1;i++) {
temp=i/m+1;
j=i%m+1;
if(i%m==0) printf("\n");
printf("%d\t",a[temp][j]);
}
printf("\n");
return 0;
}
|
Mrpt.h | #ifndef CPP_MRPT_H_
#define CPP_MRPT_H_
#include <algorithm>
#include <cmath>
#include <functional>
#include <map>
#include <numeric>
#include <random>
#include <set>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/SparseCore>
struct Mrpt_Parameters {
int n_trees = 0; /**< Number of trees in the index. */
int depth = 0; /**< Depth of the trees in the index. */
int k = 0; /**< Number of nearest neighbors searched for (if the index is autotuned; otherwise 0). */
int votes = 0; /**< Optimal vote threshold (if the index is autotuned and the target recall is set; otherwise 0). */
double estimated_qtime = 0.0; /**< Estimated query time (if the index is autotuned and the target recall is set; otherwise 0.0). */
double estimated_recall = 0.0; /**< Estimated recall (if the index is autotuned and the target recall is set; otherwise 0.0). */
};
class Mrpt {
public:
/** @name Constructors
* The constructor does not actually build the index. The building is done
* by the function grow() which has to be called before queries can be made.
* There are two different versions of the constructor which differ only
* by the type of the input data. The first version takes the data set
* as `Ref` to `MatrixXf`, which means that the argument
* can be either `MatrixXf` or `Map<MatrixXf>` (also certain blocks of `MatrixXf`
* may be accepted, see [Eigen::Ref](https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html)
* for more information). The second version takes a float
* pointer to an array containing the data set, and the dimension and
* the sample size of the data. There are also corresponding versions
* of all the member functions which take input data. In all cases the data
* is assumed to be stored in column-major order such that each data point
* is stored contiguously in memory. In all cases no copies are made of
* the original data matrix. */
/**
* @param X_ Eigen ref to the data set, stored as one data point per column
*/
Mrpt(const Eigen::Ref<const Eigen::MatrixXf> &X_) :
X(Eigen::Map<const Eigen::MatrixXf>(X_.data(), X_.rows(), X_.cols())),
n_samples(X_.cols()),
dim(X_.rows()) {}
/**
* @param X_ a float array containing the data set with each data point
* stored contiguously in memory
* @param dim_ dimension of the data
* @param n_samples_ number of data points
*/
Mrpt(const float *X_, int dim_, int n_samples_) :
X(Eigen::Map<const Eigen::MatrixXf>(X_, dim_, n_samples_)),
n_samples(n_samples_),
dim(dim_) {}
/**@}*/
/** @name Normal index building.
* Build a normal (not autotuned) index.
*/
/**
* Build a normal index.
*
* @param n_trees_ number of trees to be grown
* @param depth_ depth of the trees; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ is the number
* of data points
* @param density_ expected proportion of non-zero components in the
* random vectors; on the interval \f$(0,1]\f$; default value sets density to
* \f$ 1 / \sqrt{d} \f$, where \f$d\f$ is the dimension of the data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(int n_trees_, int depth_, float density_ = -1.0, int seed = 0) {
if (!empty()) {
throw std::logic_error("The index has already been grown.");
}
if (n_trees_ <= 0) {
throw std::out_of_range("The number of trees must be positive.");
}
if (depth_ <= 0 || depth_ > std::log2(n_samples)) {
throw std::out_of_range("The depth must belong to the set {1, ... , log2(n)}.");
}
if (density_ < -1.0001 || density_ > 1.0001 || (density_ > -0.9999 && density_ < -0.0001)) {
throw std::out_of_range("The density must be on the interval (0,1].");
}
n_trees = n_trees_;
depth = depth_;
n_pool = n_trees_ * depth_;
n_array = 1 << (depth_ + 1);
if (density_ < 0) {
density = 1.0 / std::sqrt(dim);
} else {
density = density_;
}
density < 1 ? build_sparse_random_matrix(sparse_random_matrix, n_pool, dim, density, seed) :
build_dense_random_matrix(dense_random_matrix, n_pool, dim, seed);
split_points = Eigen::MatrixXf(n_array, n_trees);
tree_leaves = std::vector<std::vector<int>>(n_trees);
count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth);
leaf_first_indices = leaf_first_indices_all[depth];
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
Eigen::MatrixXf tree_projections;
if (density < 1)
tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * X;
else
tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * X;
tree_leaves[n_tree] = std::vector<int>(n_samples);
std::vector<int> &indices = tree_leaves[n_tree];
std::iota(indices.begin(), indices.end(), 0);
grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections);
}
}
/**@}*/
/** @name Autotuned index building
* Builds an index by autotuning such that the parameters giving the fastest
* query time at the target recall level are found. If the target recall level
* is not reached at all, then an index giving the highest recall level
* is built. The parameters() function can be used to retrieve these optimal
* parameter values and the estimated query time and the estimated recall.
* There is a version which uses a separate set of test queries (`grow`),
* and a version which samples a test set from the data set (`grow_autotune`).
*/
/**
* Build an autotuned index.
*
* @param target_recall target recall level; on the range [0,1]
* @param Q Eigen ref to the the test queries (col = data point, row = dimension).
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(double target_recall, const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1,
int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1,
float density = -1.0, int seed = 0) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
grow(Q, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed);
prune(target_recall);
}
/** Build an autotuned index.
*
* @param target_recall target recall level; on the range [0,1]
* @param Q float array containing the test queries
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param indices_test parameter used by the version which uses no
* separate test set, leave empty.
*/
void grow(double target_recall, const float *Q, int n_test, int k_, int trees_max = -1,
int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1,
float density = -1.0, int seed = 0, const std::vector<int> &indices_test = {}) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
grow(Q, n_test, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed, indices_test);
prune(target_recall);
}
/** Build an autotuned index sampling test queries from the training set.
*
* @param target_recall target recall level; on the range [0,1]
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param n_test number of test queries sampled from the training set.
*/
void grow_autotune(double target_recall, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1,
int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) {
if (n_test < 1) {
throw std::out_of_range("Test set size must be > 0.");
}
n_test = n_test > n_samples ? n_samples : n_test;
std::vector<int> indices_test(sample_indices(n_test, seed));
const Eigen::MatrixXf Q(subset(indices_test));
grow(target_recall, Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed, indices_test);
}
/**
* Get the optimal parameters and the estimated recall and query time found
* by autotuning. If the index is autotuned without preset recall level,
* `estimated_recall`, `estimated_qtime` and `votes` are set to their
* default value 0, and `n_trees` and `depth` are set to `trees_max` and
* `depth_max, respectively. If the index is not autotuned,
* `estimated_recall`, `estimated_qtime`, `votes` and `k` are all set to
* their default value 0.
*
* @return parameters of the index
*/
Mrpt_Parameters parameters() const {
if (index_type == normal || index_type == autotuned_unpruned) {
Mrpt_Parameters p;
p.n_trees = n_trees;
p.depth = depth;
p.k = par.k;
return p;
}
return par;
}
/**
* Get whether the index has been autotuned.
*
* @return true if the index has been autotuned, false otherwise.
*/
bool is_autotuned() const {
return index_type == autotuned;
}
/**@}*/
/** @name Autotuned index building without preset recall level
* Build an autotuned index. This version does not require prespecifying
* a target recall level, but an index generated by this function can be used
* to subset different indices with different recall levels. This is done by
* subset(). The function optimal_parameters() can be used to retrieve a
* pareto frontier of optimal parameters. There is a version which uses a
* separate set of test queries (`grow`), and a version which samples a
* test set from the data set (`grow_autotune`).
*/
/**@{*/
/** Build an autotuned index without prespecifying a recall level.
*
* @param data a float array containing the test queries.
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param indices_test parameter used by the version which uses no
* separate test set, leave empty.
**/
void grow(const float *data, int n_test, int k_, int trees_max = -1, int depth_max = -1,
int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0,
const std::vector<int> &indices_test = {}) {
if (trees_max == - 1) {
trees_max = std::min(std::sqrt(n_samples), 1000.0);
}
if (depth_min_ == -1) {
depth_min_ = std::max(static_cast<int>(std::log2(n_samples) - 11), 5);
}
if (depth_max == -1) {
depth_max = std::max(static_cast<int>(std::log2(n_samples) - 4), depth_min_);
}
if (votes_max_ == -1) {
votes_max_ = std::max(trees_max / 10, std::min(trees_max, 10));
}
if (density_ > -1.0001 && density_ < -0.9999) {
density_ = 1.0 / std::sqrt(dim);
}
if (!empty()) {
throw std::logic_error("The index has already been grown.");
}
if (k_ <= 0 || k_ > n_samples) {
throw std::out_of_range("k_ must belong to the set {1, ..., n}.");
}
if (trees_max <= 0) {
throw std::out_of_range("trees_max must be positive.");
}
if (depth_max <= 0 || depth_max > std::log2(n_samples)) {
throw std::out_of_range("depth_max must belong to the set {1, ... , log2(n)}.");
}
if (depth_min_ <= 0 || depth_min_ > depth_max) {
throw std::out_of_range("depth_min_ must belong to the set {1, ... , depth_max}");
}
if (votes_max_ <= 0 || votes_max_ > trees_max) {
throw std::out_of_range("votes_max_ must belong to the set {1, ... , trees_max}.");
}
if (density_ < 0.0 || density_ > 1.0001) {
throw std::out_of_range("The density must be on the interval (0,1].");
}
if(n_samples < 101) {
throw std::out_of_range("Sample size must be at least 101 to autotune an index.");
}
depth_min = depth_min_;
votes_max = votes_max_;
k = k_;
const Eigen::Map<const Eigen::MatrixXf> Q(data, dim, n_test);
grow(trees_max, depth_max, density_, seed);
Eigen::MatrixXi exact(k, n_test);
compute_exact(Q, exact, indices_test);
std::vector<Eigen::MatrixXd> recalls(depth_max - depth_min + 1);
cs_sizes = std::vector<Eigen::MatrixXd>(depth_max - depth_min + 1);
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max);
cs_sizes[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max);
}
for (int i = 0; i < n_test; ++i) {
std::vector<Eigen::MatrixXd> recall_tmp(depth_max - depth_min + 1);
std::vector<Eigen::MatrixXd> cs_size_tmp(depth_max - depth_min + 1);
count_elected(Q.col(i), Eigen::Map<Eigen::VectorXi>(exact.data() + i * k, k),
votes_max, recall_tmp, cs_size_tmp);
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] += recall_tmp[d - depth_min];
cs_sizes[d - depth_min] += cs_size_tmp[d - depth_min];
}
}
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] /= (k * n_test);
cs_sizes[d - depth_min] /= n_test;
}
fit_times(Q);
std::set<Mrpt_Parameters,decltype(is_faster)*> pars = list_parameters(recalls);
opt_pars = pareto_frontier(pars);
index_type = autotuned_unpruned;
par.k = k_;
}
/** Build an autotuned index without prespecifying a recall level.
*
* @param Q Eigen ref to the test queries.
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max depth of trees grown; ; on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components of random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1,
int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0) {
if (Q.rows() != dim) {
throw std::invalid_argument("Dimensions of the data and the validation set do not match.");
}
grow(Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed);
}
/** Build an autotuned index sampling test queries from the training set
* and without prespecifying a recall level.
*
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max depth of trees grown; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components of random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param n_test number of test queries sampled from the training set.
*/
void grow_autotune(int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1,
int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) {
if (n_test < 1) {
throw std::out_of_range("Test set size must be > 0.");
}
n_test = n_test > n_samples ? n_samples : n_test;
std::vector<int> indices_test(sample_indices(n_test, seed));
const Eigen::MatrixXf Q(subset(indices_test));
grow(Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed, indices_test);
}
/** Create a new index by copying trees from an autotuned index grown
* without a prespecified recall level. The index is created so that
* it gives a fastest query time at the recall level given as the parameter.
* If this recall level is not met, then it creates an index with a
* highest possible recall level.
*
* @param target_recall target recall level; on the range [0,1]
* @return an autotuned Mrpt index with a recall level at least as high as
* target_recall
*/
Mrpt subset(double target_recall) const {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
Mrpt index2(X);
index2.par = parameters(target_recall);
int depth_max = depth;
index2.n_trees = index2.par.n_trees;
index2.depth = index2.par.depth;
index2.votes = index2.par.votes;
index2.n_pool = index2.depth * index2.n_trees;
index2.n_array = 1 << (index2.depth + 1);
index2.tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2.n_trees);
index2.leaf_first_indices_all = leaf_first_indices_all;
index2.density = density;
index2.k = k;
index2.split_points = split_points.topLeftCorner(index2.n_array, index2.n_trees);
index2.leaf_first_indices = leaf_first_indices_all[index2.depth];
if (index2.density < 1) {
index2.sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2.n_pool, index2.dim);
for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree)
index2.sparse_random_matrix.middleRows(n_tree * index2.depth, index2.depth) =
sparse_random_matrix.middleRows(n_tree * depth_max, index2.depth);
} else {
index2.dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2.n_pool, index2.dim);
for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree)
index2.dense_random_matrix.middleRows(n_tree * index2.depth, index2.depth) =
dense_random_matrix.middleRows(n_tree * depth_max, index2.depth);
}
index2.index_type = autotuned;
return index2;
}
/** Create a new index by copying trees from an autotuned index grown
* without a prespecified recall level. The index is created so that
* it gives a fastest query time at the recall level given as the parameter.
* If this recall level is not met, then it creates an index with a
* highest possible recall level. This function differs from subset() only
* by the return value.
*
* @param target_recall target recall level; on the range [0,1]
* @return pointer to a dynamically allocated autotuned Mrpt index with
* a recall level at least as high as target_recall
*/
Mrpt *subset_pointer(double target_recall) const {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
Mrpt *index2 = new Mrpt(X);
index2->par = parameters(target_recall);
int depth_max = depth;
index2->n_trees = index2->par.n_trees;
index2->depth = index2->par.depth;
index2->votes = index2->par.votes;
index2->n_pool = index2->depth * index2->n_trees;
index2->n_array = 1 << (index2->depth + 1);
index2->tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2->n_trees);
index2->leaf_first_indices_all = leaf_first_indices_all;
index2->density = density;
index2->k = k;
index2->split_points = split_points.topLeftCorner(index2->n_array, index2->n_trees);
index2->leaf_first_indices = leaf_first_indices_all[index2->depth];
if (index2->density < 1) {
index2->sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2->n_pool, index2->dim);
for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree)
index2->sparse_random_matrix.middleRows(n_tree * index2->depth, index2->depth) =
sparse_random_matrix.middleRows(n_tree * depth_max, index2->depth);
} else {
index2->dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2->n_pool, index2->dim);
for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree)
index2->dense_random_matrix.middleRows(n_tree * index2->depth, index2->depth) =
dense_random_matrix.middleRows(n_tree * depth_max, index2->depth);
}
index2->index_type = autotuned;
return index2;
}
/**
* Return the pareto frontier of optimal parameters for an index which
* is autotuned without setting a recall level. This means that each
* parameter combination in a returned vector is optimal in a sense
* that it is a fastest (measured by query time) parameter combination
* to obtain as least as high recall level that it has.
*
* @return vector of optimal parameters
*/
std::vector<Mrpt_Parameters> optimal_parameters() const {
if (index_type == normal) {
throw std::logic_error("The list of optimal parameters cannot be retrieved for the non-autotuned index.");
}
if (index_type == autotuned) {
throw std::logic_error("The list of optimal parameters cannot be retrieved for the index which has already been subsetted or deleted to the target recall level.");
}
std::vector<Mrpt_Parameters> new_pars;
std::copy(opt_pars.begin(), opt_pars.end(), std::back_inserter(new_pars));
return new_pars;
}
/**@}*/
/** @name Approximate k-nn search
* A query using a non-autotuned index. Finds k approximate nearest neighbors
* from a data set X for a query point q. Because the index is not autotuned,
* k and vote threshold are set manually. The indices of k nearest neighbors
* are written to a buffer out, which has to be preallocated to have at least
* length k. Optionally also Euclidean distances to these k nearest points
* are written to a buffer out_distances. If there are less than k points in
* the candidate set, -1 is written to the remaining locations of the
* output buffers.
*/
/**
* Approximate k-nn search using a normal index.
*
* @param data pointer to an array containing the query point
* @param k number of nearest neighbors searched for
* @param vote_threshold - number of votes required for a query point to be included in the candidate set
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const float *data, int k, int vote_threshold, int *out,
float *out_distances = nullptr, int *out_n_elected = nullptr) const {
if (k <= 0 || k > n_samples) {
throw std::out_of_range("k must belong to the set {1, ..., n}.");
}
if (vote_threshold <= 0 || vote_threshold > n_trees) {
throw std::out_of_range("vote_threshold must belong to the set {1, ... , n_trees}.");
}
if (empty()) {
throw std::logic_error("The index must be built before making queries.");
}
const Eigen::Map<const Eigen::VectorXf> q(data, dim);
Eigen::VectorXf projected_query(n_pool);
if (density < 1)
projected_query.noalias() = sparse_random_matrix * q;
else
projected_query.noalias() = dense_random_matrix * q;
std::vector<int> found_leaves(n_trees);
/*
* The following loops over all trees, and routes the query to exactly one
* leaf in each.
*/
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int idx_tree = 0;
for (int d = 0; d < depth; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
}
found_leaves[n_tree] = idx_tree - (1 << depth) + 1;
}
int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1;
Eigen::VectorXi elected(n_trees * max_leaf_size);
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int leaf_begin = leaf_first_indices[found_leaves[n_tree]];
int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
if (++votes(idx) == vote_threshold)
elected(n_elected++) = idx;
}
}
if (out_n_elected) {
*out_n_elected = n_elected;
}
exact_knn(q, k, elected, n_elected, out, out_distances);
}
/**
* Approximate k-nn search using a normal index.
*
* @param q Eigen ref to the query point
* @param k number of nearest neighbors searched for
* @param vote_threshold number of votes required for a query point to be included in the candidate set
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int vote_threshold, int *out,
float *out_distances = nullptr, int *out_n_elected = nullptr) const {
query(q.data(), k, vote_threshold, out, out_distances, out_n_elected);
}
/**@}*/
/** @name Approximate k-nn search using autotuned index
* Approximate k-nn search using an autotuned index. Finds k approximate
* nearest neighbors from a data set X for a query point q. Because the index
* is autotuned, no parameters other than a query point and an output are
* required: k is preset, and the optimal vote count is used automatically.
* The indices of k nearest neighbors are written to a buffer out, which has
* to be preallocated to have at least length k. Optionally also the Euclidean
* distances to these k nearest points are written to a buffer
* out_distances. If there are less than k points in the candidate set,
* -1 is written to the remaining locations of the output buffers.
*/
/**
* Approximate k-nn search using an autotuned index.
*
* @param q pointer to an array containing the query point
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const float *q, int *out, float *out_distances = nullptr,
int *out_n_elected = nullptr) const {
if (index_type == normal) {
throw std::logic_error("The index is not autotuned: k and vote threshold has to be specified.");
}
if (index_type == autotuned_unpruned) {
throw std::logic_error("The target recall level has to be set before making queries.");
}
query(q, k, votes, out, out_distances, out_n_elected);
}
/**
* Approximate k-nn search using an autotuned index.
*
* @param q Eigen ref to the query point
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const Eigen::Ref<const Eigen::VectorXf> &q, int *out, float *out_distances = nullptr,
int *out_n_elected = nullptr) const {
query(q.data(), out, out_distances, out_n_elected);
}
/**@}*/
/** @name Exact k-nn search
* Functions for fast exact k-nn search: find k nearest neighbors for a
* query point q from a data set X_. The indices of k nearest neighbors are
* written to a buffer out, which has to be preallocated to have at least
* length k. Optionally also the Euclidean distances to these k nearest points
* are written to a buffer out_distances. There are both static and member
* versions.
*/
/**
* @param q_data pointer to an array containing the query point
* @param X_data pointer to an array containing the data set
* @param dim dimension of data
* @param n_samples number of points in a data set
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
static void exact_knn(const float *q_data, const float *X_data, int dim, int n_samples,
int k, int *out, float *out_distances = nullptr) {
const Eigen::Map<const Eigen::MatrixXf> X(X_data, dim, n_samples);
const Eigen::Map<const Eigen::VectorXf> q(q_data, dim);
if (k < 1 || k > n_samples) {
throw std::out_of_range("k must be positive and no greater than the sample size of data X.");
}
Eigen::VectorXf distances(n_samples);
#pragma omp parallel for
for (int i = 0; i < n_samples; ++i)
distances(i) = (X.col(i) - q).squaredNorm();
if (k == 1) {
Eigen::MatrixXf::Index index;
distances.minCoeff(&index);
out[0] = index;
if (out_distances)
out_distances[0] = std::sqrt(distances(index));
return;
}
Eigen::VectorXi idx(n_samples);
std::iota(idx.data(), idx.data() + n_samples, 0);
std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_samples,
[&distances](int i1, int i2) { return distances(i1) < distances(i2); });
for (int i = 0; i < k; ++i)
out[i] = idx(i);
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = std::sqrt(distances(idx(i)));
}
}
/**
* @param q Eigen ref to a query point
* @param X Eigen ref to a data set
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
static void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q,
const Eigen::Ref<const Eigen::MatrixXf> &X,
int k, int *out, float *out_distances = nullptr) {
Mrpt::exact_knn(q.data(), X.data(), X.rows(), X.cols(), k, out, out_distances);
}
/**
* @param q pointer to an array containing the query point
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
void exact_knn(const float *q, int k, int *out, float *out_distances = nullptr) const {
Mrpt::exact_knn(q, X.data(), dim, n_samples, k, out, out_distances);
}
/**
* @param q pointer to an array containing the query point
* @param k number of points searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int *out,
float *out_distances = nullptr) const {
Mrpt::exact_knn(q.data(), X.data(), dim, n_samples, k, out, out_distances);
}
/**@}*/
/** @name Utility functions
* Saving and loading an index and checking if it is already constructed.
* Saving and loading work for both autotuned and non-autotuned indices, and
* load() retrieves also the optimal parameters found by autotuning.
* The same data set used to build a saved index has to be used to
* construct the index into which it is loaded.
*/
/**
* Saves the index to a file.
*
* @param path - filepath to the output file.
* @return true if saving succeeded, false otherwise.
*/
bool save(const char *path) const {
FILE *fd;
if ((fd = fopen(path, "wb")) == NULL)
return false;
int i = index_type;
fwrite(&i, sizeof(int), 1, fd);
if (index_type == 2) {
write_parameter_list(opt_pars, fd);
}
write_parameters(&par, fd);
fwrite(&n_trees, sizeof(int), 1, fd);
fwrite(&depth, sizeof(int), 1, fd);
fwrite(&density, sizeof(float), 1, fd);
fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd);
// save tree leaves
for (int i = 0; i < n_trees; ++i) {
int sz = tree_leaves[i].size();
fwrite(&sz, sizeof(int), 1, fd);
fwrite(&tree_leaves[i][0], sizeof(int), sz, fd);
}
// save random matrix
if (density < 1) {
int non_zeros = sparse_random_matrix.nonZeros();
fwrite(&non_zeros, sizeof(int), 1, fd);
for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) {
for (Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) {
float val = it.value();
int row = it.row(), col = it.col();
fwrite(&row, sizeof(int), 1, fd);
fwrite(&col, sizeof(int), 1, fd);
fwrite(&val, sizeof(float), 1, fd);
}
}
} else {
fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd);
}
fclose(fd);
return true;
}
/**
* Loads an index from a file.
*
* @param path filepath to the index file.
* @return true if loading succeeded, false otherwise.
*/
bool load(const char *path) {
FILE *fd;
if ((fd = fopen(path, "rb")) == NULL)
return false;
int i;
fread(&i, sizeof(int), 1, fd);
index_type = static_cast<itype>(i);
if (index_type == autotuned_unpruned) {
read_parameter_list(fd);
}
read_parameters(&par, fd);
fread(&n_trees, sizeof(int), 1, fd);
fread(&depth, sizeof(int), 1, fd);
fread(&density, sizeof(float), 1, fd);
n_pool = n_trees * depth;
n_array = 1 << (depth + 1);
count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth);
leaf_first_indices = leaf_first_indices_all[depth];
split_points = Eigen::MatrixXf(n_array, n_trees);
fread(split_points.data(), sizeof(float), n_array * n_trees, fd);
// load tree leaves
tree_leaves = std::vector<std::vector<int>>(n_trees);
for (int i = 0; i < n_trees; ++i) {
int sz;
fread(&sz, sizeof(int), 1, fd);
std::vector<int> leaves(sz);
fread(&leaves[0], sizeof(int), sz, fd);
tree_leaves[i] = leaves;
}
// load random matrix
if (density < 1) {
int non_zeros;
fread(&non_zeros, sizeof(int), 1, fd);
sparse_random_matrix = Eigen::SparseMatrix<float>(n_pool, dim);
std::vector<Eigen::Triplet<float>> triplets;
for (int k = 0; k < non_zeros; ++k) {
int row, col;
float val;
fread(&row, sizeof(int), 1, fd);
fread(&col, sizeof(int), 1, fd);
fread(&val, sizeof(float), 1, fd);
triplets.push_back(Eigen::Triplet<float>(row, col, val));
}
sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
sparse_random_matrix.makeCompressed();
} else {
dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_pool, dim);
fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd);
}
fclose(fd);
k = par.k;
votes = par.votes;
return true;
}
/**
* Is the index is already constructed or not?
*
* @return - is the index empty?
*/
bool empty() const {
return n_trees == 0;
}
/**@}*/
/** @name
* Friend declarations for test fixtures. Tests are located at
* https://github.com/vioshyvo/RP-test.
*/
friend class MrptTest;
friend class UtilityTest;
/**@}*/
private:
/**
* Builds a single random projection tree. The tree is constructed by recursively
* projecting the data on a random vector and splitting into two by the median.
*/
void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end,
int tree_level, int i, int n_tree, const Eigen::MatrixXf &tree_projections) {
int n = end - begin;
int idx_left = 2 * i + 1;
int idx_right = idx_left + 1;
if (tree_level == depth) return;
std::nth_element(begin, begin + n / 2, end,
[&tree_projections, tree_level] (int i1, int i2) {
return tree_projections(tree_level, i1) < tree_projections(tree_level, i2);
});
auto mid = end - n / 2;
if (n % 2) {
split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1));
} else {
auto left_it = std::max_element(begin, mid,
[&tree_projections, tree_level] (int i1, int i2) {
return tree_projections(tree_level, i1) < tree_projections(tree_level, i2);
});
split_points(i, n_tree) = (tree_projections(tree_level, *mid) +
tree_projections(tree_level, *left_it)) / 2.0;
}
grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections);
grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections);
}
/**
* Find k nearest neighbors from data for the query point
*/
void exact_knn(const Eigen::Map<const Eigen::VectorXf> &q, int k, const Eigen::VectorXi &indices,
int n_elected, int *out, float *out_distances = nullptr) const {
if (!n_elected) {
for (int i = 0; i < k; ++i)
out[i] = -1;
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = -1;
}
return;
}
Eigen::VectorXf distances(n_elected);
#pragma omp parallel for
for (int i = 0; i < n_elected; ++i)
distances(i) = (X.col(indices(i)) - q).squaredNorm();
if (k == 1) {
Eigen::MatrixXf::Index index;
distances.minCoeff(&index);
out[0] = n_elected ? indices(index) : -1;
if (out_distances)
out_distances[0] = n_elected ? std::sqrt(distances(index)) : -1;
return;
}
int n_to_sort = n_elected > k ? k : n_elected;
Eigen::VectorXi idx(n_elected);
std::iota(idx.data(), idx.data() + n_elected, 0);
std::partial_sort(idx.data(), idx.data() + n_to_sort, idx.data() + n_elected,
[&distances](int i1, int i2) { return distances(i1) < distances(i2); });
for (int i = 0; i < k; ++i)
out[i] = i < n_elected ? indices(idx(i)) : -1;
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = i < n_elected ? std::sqrt(distances(idx(i))) : -1;
}
}
void prune(double target_recall) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
par = parameters(target_recall);
if (!par.n_trees) {
return;
}
int depth_max = depth;
n_trees = par.n_trees;
depth = par.depth;
votes = par.votes;
n_pool = depth * n_trees;
n_array = 1 << (depth + 1);
tree_leaves.resize(n_trees);
tree_leaves.shrink_to_fit();
split_points.conservativeResize(n_array, n_trees);
leaf_first_indices = leaf_first_indices_all[depth];
if (density < 1) {
Eigen::SparseMatrix<float, Eigen::RowMajor> srm_new(n_pool, dim);
for (int n_tree = 0; n_tree < n_trees; ++n_tree)
srm_new.middleRows(n_tree * depth, depth) = sparse_random_matrix.middleRows(n_tree * depth_max, depth);
sparse_random_matrix = srm_new;
} else {
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> drm_new(n_pool, dim);
for (int n_tree = 0; n_tree < n_trees; ++n_tree)
drm_new.middleRows(n_tree * depth, depth) = dense_random_matrix.middleRows(n_tree * depth_max, depth);
dense_random_matrix = drm_new;
}
index_type = autotuned;
}
void count_elected(const Eigen::VectorXf &q, const Eigen::Map<Eigen::VectorXi> &exact, int votes_max,
std::vector<Eigen::MatrixXd> &recalls, std::vector<Eigen::MatrixXd> &cs_sizes) const {
Eigen::VectorXf projected_query(n_pool);
if (density < 1)
projected_query.noalias() = sparse_random_matrix * q;
else
projected_query.noalias() = dense_random_matrix * q;
int depth_min = depth - recalls.size() + 1;
std::vector<std::vector<int>> start_indices(n_trees);
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
start_indices[n_tree] = std::vector<int>(depth - depth_min + 1);
int idx_tree = 0;
for (int d = 0; d < depth; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
if (d >= depth_min - 1)
start_indices[n_tree][d - depth_min + 1] = idx_tree - (1 << (d + 1)) + 1;
}
}
const int *exact_begin = exact.data();
const int *exact_end = exact.data() + exact.size();
for (int depth_crnt = depth_min; depth_crnt <= depth; ++depth_crnt) {
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt];
Eigen::MatrixXd recall(votes_max, n_trees);
Eigen::MatrixXd candidate_set_size(votes_max, n_trees);
recall.col(0) = Eigen::VectorXd::Zero(votes_max);
candidate_set_size.col(0) = Eigen::VectorXd::Zero(votes_max);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
std::vector<int> &found_leaves = start_indices[n_tree];
if (n_tree) {
recall.col(n_tree) = recall.col(n_tree - 1);
candidate_set_size.col(n_tree) = candidate_set_size.col(n_tree - 1);
}
int leaf_begin = leaf_first_indices[found_leaves[depth_crnt - depth_min]];
int leaf_end = leaf_first_indices[found_leaves[depth_crnt - depth_min] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
int v = ++votes(idx);
if (v <= votes_max) {
candidate_set_size(v - 1, n_tree)++;
if (std::find(exact_begin, exact_end, idx) != exact_end)
recall(v - 1, n_tree)++;
}
}
}
recalls[depth_crnt - depth_min] = recall;
cs_sizes[depth_crnt - depth_min] = candidate_set_size;
}
}
/**
* Builds a random sparse matrix for use in random projection. The components of
* the matrix are drawn from the distribution
*
* 0 w.p. 1 - a
* N(0, 1) w.p. a
*
* where a = density.
*/
static void build_sparse_random_matrix(Eigen::SparseMatrix<float, Eigen::RowMajor> &sparse_random_matrix,
int n_row, int n_col, float density, int seed = 0) {
sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(n_row, n_col);
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::uniform_real_distribution<float> uni_dist(0, 1);
std::normal_distribution<float> norm_dist(0, 1);
std::vector<Eigen::Triplet<float>> triplets;
for (int j = 0; j < n_row; ++j) {
for (int i = 0; i < n_col; ++i) {
if (uni_dist(gen) > density) continue;
triplets.push_back(Eigen::Triplet<float>(j, i, norm_dist(gen)));
}
}
sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
sparse_random_matrix.makeCompressed();
}
/*
* Builds a random dense matrix for use in random projection. The components of
* the matrix are drawn from the standard normal distribution.
*/
static void build_dense_random_matrix(Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> &dense_random_matrix,
int n_row, int n_col, int seed = 0) {
dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_row, n_col);
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::normal_distribution<float> normal_dist(0, 1);
std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_row * n_col,
[&normal_dist, &gen] { return normal_dist(gen); });
}
void compute_exact(const Eigen::Map<const Eigen::MatrixXf> &Q, Eigen::MatrixXi &out_exact,
const std::vector<int> &indices_test = {}) const {
int n_test = Q.cols();
Eigen::VectorXi idx(n_samples);
std::iota(idx.data(), idx.data() + n_samples, 0);
for (int i = 0; i < n_test; ++i) {
if(!indices_test.empty()) {
std::remove(idx.data(), idx.data() + n_samples, indices_test[i]);
}
exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + i * dim, dim), k, idx,
(indices_test.empty() ? n_samples : n_samples - 1), out_exact.data() + i * k);
std::sort(out_exact.data() + i * k, out_exact.data() + i * k + k);
if(!indices_test.empty()) {
idx[n_samples - 1] = indices_test[i];
}
}
}
static bool is_faster(const Mrpt_Parameters &par1, const Mrpt_Parameters &par2) {
return par1.estimated_qtime < par2.estimated_qtime;
}
void vote(const Eigen::VectorXf &projected_query, int vote_threshold, Eigen::VectorXi &elected,
int &n_elected, int n_trees, int depth_crnt) {
std::vector<int> found_leaves(n_trees);
const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt];
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int idx_tree = 0;
for (int d = 0; d < depth_crnt; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
}
found_leaves[n_tree] = idx_tree - (1 << depth_crnt) + 1;
}
int max_leaf_size = n_samples / (1 << depth_crnt) + 1;
elected = Eigen::VectorXi(n_trees * max_leaf_size);
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int leaf_begin = leaf_first_indices[found_leaves[n_tree]];
int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
if (++votes(idx) == vote_threshold)
elected(n_elected++) = idx;
}
}
}
std::pair<double,double> fit_projection_times(const Eigen::Map<const Eigen::MatrixXf> &Q,
std::vector<int> &exact_x) {
std::vector<double> projection_times, projection_x;
long double idx_sum = 0;
std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50};
generate_x(tested_trees, n_trees, 10, n_trees);
for (int d = depth_min; d <= depth; ++d) {
for (int i = 0; i < (int) tested_trees.size(); ++i) {
int t = tested_trees[i];
int n_random_vectors = t * d;
projection_x.push_back(n_random_vectors);
Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_mat;
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_mat;
if (density < 1) {
build_sparse_random_matrix(sparse_mat, n_random_vectors, dim, density);
} else {
build_dense_random_matrix(dense_mat, n_random_vectors, dim);
}
double start_proj = omp_get_wtime();
Eigen::VectorXf projected_query(n_random_vectors);
if (density < 1) {
projected_query.noalias() = sparse_mat * Q.col(0);
} else {
projected_query.noalias() = dense_mat * Q.col(0);
}
double end_proj = omp_get_wtime();
projection_times.push_back(end_proj - start_proj);
idx_sum += projected_query.norm();
int votes_index = votes_max < t ? votes_max : t;
for (int v = 1; v <= votes_index; ++v) {
int cs_size = get_candidate_set_size(t, d, v);
if (cs_size > 0) exact_x.push_back(cs_size);
}
}
}
// use results to ensure that the compiler does not optimize away the timed code.
projection_x[0] += idx_sum > 1.0 ? 0.0000 : 0.0001;
return fit_theil_sen(projection_x, projection_times);
}
std::vector<std::map<int,std::pair<double,double>>> fit_voting_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
int n_test = Q.cols();
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(0, n_test - 1);
std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50};
generate_x(tested_trees, n_trees, 10, n_trees);
std::vector<int> vote_thresholds_x {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
generate_x(vote_thresholds_x, votes_max, 10, votes_max);
beta_voting = std::vector<std::map<int,std::pair<double,double>>>();
for (int d = depth_min; d <= depth; ++d) {
std::map<int,std::pair<double,double>> beta;
for (const auto &v : vote_thresholds_x) {
long double idx_sum = 0;
std::vector<double> voting_times, voting_x;
for (int i = 0; i < (int) tested_trees.size(); ++i) {
int t = tested_trees[i];
int n_el = 0;
Eigen::VectorXi elected;
auto ri = uni(rng);
Eigen::VectorXf projected_query(n_trees * depth);
if (density < 1) {
projected_query.noalias() = sparse_random_matrix * Q.col(ri);
} else {
projected_query.noalias() = dense_random_matrix * Q.col(ri);
}
double start_voting = omp_get_wtime();
vote(projected_query, v, elected, n_el, t, d);
double end_voting = omp_get_wtime();
voting_times.push_back(end_voting - start_voting);
voting_x.push_back(t);
for (int i = 0; i < n_el; ++i)
idx_sum += elected(i);
}
voting_x[0] += idx_sum > 1.0 ? 0.0 : 0.00001;
beta[v] = fit_theil_sen(voting_x, voting_times);
}
beta_voting.push_back(beta);
}
return beta_voting;
}
static void generate_x(std::vector<int> &x, int max_generated, int n_tested, int max_val) {
n_tested = max_generated > n_tested ? n_tested : max_val;
int increment = max_generated / n_tested;
for (int i = 1; i <= n_tested; ++i) {
if (std::find(x.begin(), x.end(), i * increment) == x.end() && i * increment <= max_generated) {
x.push_back(i * increment);
}
}
auto end = std::remove_if(x.begin(), x.end(), [max_val](int t) { return t > max_val; });
x.erase(end, x.end());
}
std::pair<double,double> fit_exact_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
std::vector<int> s_tested {1,2,5,10,20,35,50,75,100,150,200,300,400,500};
generate_x(s_tested, n_samples / 20, 20, n_samples);
int n_test = Q.cols();
std::vector<double> exact_times;
long double idx_sum = 0;
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(0, n_test - 1);
std::uniform_int_distribution<int> uni2(0, n_samples - 1);
std::vector<double> ex;
int n_sim = 20;
for (int i = 0; i < (int) s_tested.size(); ++i) {
double mean_exact_time = 0;
int s_size = s_tested[i];
ex.push_back(s_size);
for (int m = 0; m < n_sim; ++m) {
auto ri = uni(rng);
Eigen::VectorXi elected(s_size);
for (int j = 0; j < elected.size(); ++j)
elected(j) = uni2(rng);
double start_exact = omp_get_wtime();
std::vector<int> res(k);
exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + ri * dim, dim), k, elected, s_size, &res[0]);
double end_exact = omp_get_wtime();
mean_exact_time += (end_exact - start_exact);
for (int l = 0; l < k; ++l)
idx_sum += res[l];
}
mean_exact_time /= n_sim;
exact_times.push_back(mean_exact_time);
}
ex[0] += idx_sum > 1.0 ? 0.0 : 0.00001;
return fit_theil_sen(ex, exact_times);
}
std::set<Mrpt_Parameters,decltype(is_faster)*> list_parameters(const std::vector<Eigen::MatrixXd> &recalls) {
std::set<Mrpt_Parameters,decltype(is_faster)*> pars(is_faster);
std::vector<Eigen::MatrixXd> query_times(depth - depth_min + 1);
for (int d = depth_min; d <= depth; ++d) {
Eigen::MatrixXd query_time = Eigen::MatrixXd::Zero(votes_max, n_trees);
for (int t = 1; t <= n_trees; ++t) {
int votes_index = votes_max < t ? votes_max : t;
for (int v = 1; v <= votes_index; ++v) {
double qt = get_query_time(t, d, v);
query_time(v - 1, t - 1) = qt;
Mrpt_Parameters p;
p.n_trees = t;
p.depth = d;
p.votes = v;
p.k = k;
p.estimated_qtime = qt;
p.estimated_recall = recalls[d - depth_min](v - 1, t - 1);
pars.insert(p);
}
}
query_times[d - depth_min] = query_time;
}
return pars;
}
std::set<Mrpt_Parameters,decltype(is_faster)*> pareto_frontier(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars) {
opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster);
double best_recall = -1.0;
for (const auto &p : pars) { // compute pareto frontier for query times and recalls
if (p.estimated_recall > best_recall) {
opt_pars.insert(p);
best_recall = p.estimated_recall;
}
}
return opt_pars;
}
void fit_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
std::vector<int> exact_x;
beta_projection = fit_projection_times(Q, exact_x);
beta_voting = fit_voting_times(Q);
beta_exact = fit_exact_times(Q);
}
static std::pair<double,double> fit_theil_sen(const std::vector<double> &x,
const std::vector<double> &y) {
int n = x.size();
std::vector<double> slopes;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i != j)
slopes.push_back((y[j] - y[i]) / (x[j] - x[i]));
}
}
int n_slopes = slopes.size();
std::nth_element(slopes.begin(), slopes.begin() + n_slopes / 2, slopes.end());
double slope = *(slopes.begin() + n_slopes / 2);
std::vector<double> residuals(n);
for (int i = 0; i < n; ++i)
residuals[i] = y[i] - slope * x[i];
std::nth_element(residuals.begin(), residuals.begin() + n / 2, residuals.end());
double intercept = *(residuals.begin() + n / 2);
return std::make_pair(intercept, slope);
}
void write_parameters(const Mrpt_Parameters *p, FILE *fd) const {
if (!fd) {
return;
}
fwrite(&p->n_trees, sizeof(int), 1, fd);
fwrite(&p->depth, sizeof(int), 1, fd);
fwrite(&p->votes, sizeof(int), 1, fd);
fwrite(&p->k, sizeof(int), 1, fd);
fwrite(&p->estimated_qtime, sizeof(double), 1, fd);
fwrite(&p->estimated_recall, sizeof(double), 1, fd);
}
void read_parameters(Mrpt_Parameters *p, FILE *fd) {
fread(&p->n_trees, sizeof(int), 1, fd);
fread(&p->depth, sizeof(int), 1, fd);
fread(&p->votes, sizeof(int), 1, fd);
fread(&p->k, sizeof(int), 1, fd);
fread(&p->estimated_qtime, sizeof(double), 1, fd);
fread(&p->estimated_recall, sizeof(double), 1, fd);
}
void write_parameter_list(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars, FILE *fd) const {
if (!fd) {
return;
}
int par_sz = pars.size();
fwrite(&par_sz, sizeof(int), 1, fd);
for (const auto p : pars)
write_parameters(&p, fd);
}
void read_parameter_list(FILE *fd) {
if (!fd) {
return;
}
opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster);
int par_sz = 0;
fread(&par_sz, sizeof(int), 1, fd);
for (int i = 0; i < par_sz; ++i) {
Mrpt_Parameters p;
read_parameters(&p, fd);
opt_pars.insert(p);
}
}
Mrpt_Parameters parameters(double target_recall) const {
double tr = target_recall - epsilon;
for (const auto &p : opt_pars) {
if (p.estimated_recall > tr) {
return p;
}
}
if (!opt_pars.empty()) {
return *(opt_pars.rbegin());
}
return Mrpt_Parameters();
}
/**
* Computes the leaf sizes of a tree assuming a median split and that
* when the number points is odd, the extra point is always assigned to
* to the left branch.
*/
static void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) {
if (level == tree_depth) {
out_leaf_sizes.push_back(n);
return;
}
count_leaf_sizes(n - n / 2, level + 1, tree_depth, out_leaf_sizes);
count_leaf_sizes(n / 2, level + 1, tree_depth, out_leaf_sizes);
}
/**
* Computes indices of the first elements of leaves in a vector containing
* all the leaves of a tree concatenated. Assumes that median split is used
* and when the number points is odd, the extra point is always assigned to
* the left branch.
*/
static void count_first_leaf_indices(std::vector<int> &indices, int n, int depth) {
std::vector<int> leaf_sizes;
count_leaf_sizes(n, 0, depth, leaf_sizes);
indices = std::vector<int>(leaf_sizes.size() + 1);
indices[0] = 0;
for (int i = 0; i < (int) leaf_sizes.size(); ++i)
indices[i + 1] = indices[i] + leaf_sizes[i];
}
static void count_first_leaf_indices_all(std::vector<std::vector<int>> &indices, int n, int depth_max) {
for (int d = 0; d <= depth_max; ++d) {
std::vector<int> idx;
count_first_leaf_indices(idx, n, d);
indices.push_back(idx);
}
}
static double predict_theil_sen(double x, std::pair<double,double> beta) {
return beta.first + beta.second * x;
}
double get_candidate_set_size(int tree, int depth, int v) const {
return cs_sizes[depth - depth_min](v - 1, tree - 1);
}
double get_projection_time(int n_trees, int depth, int v) const {
return predict_theil_sen(n_trees * depth, beta_projection);
}
double get_voting_time(int n_trees, int depth, int v) const {
const std::map<int,std::pair<double,double>> &beta = beta_voting[depth - depth_min];
if (v <= 0 || beta.empty()) {
return 0.0;
}
for (const auto &b : beta) {
if (v <= b.first) {
return predict_theil_sen(n_trees, b.second);
}
}
return predict_theil_sen(n_trees, beta.rbegin()->second);
}
double get_exact_time(int n_trees, int depth, int v) const {
return predict_theil_sen(get_candidate_set_size(n_trees, depth, v), beta_exact);
}
double get_query_time(int tree, int depth, int v) const {
return get_projection_time(tree, depth, v)
+ get_voting_time(tree, depth, v)
+ get_exact_time(tree, depth, v);
}
std::vector<int> sample_indices(int n_test, int seed = 0) const {
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::vector<int> indices_data(n_samples);
std::iota(indices_data.begin(), indices_data.end(), 0);
std::shuffle(indices_data.begin(), indices_data.end(), gen);
return std::vector<int>(indices_data.begin(), indices_data.begin() + n_test);
}
Eigen::MatrixXf subset(const std::vector<int> &indices) const {
int n_test = indices.size();
Eigen::MatrixXf Q = Eigen::MatrixXf(dim, n_test);
for(int i = 0; i < n_test; ++i)
Q.col(i) = X.col(indices[i]);
return Q;
}
const Eigen::Map<const Eigen::MatrixXf> X; // the data matrix
Eigen::MatrixXf split_points; // all split points in all trees
std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees
Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees
std::vector<std::vector<int>> leaf_first_indices_all; // first indices for each level
std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves
const int n_samples; // sample size of data
const int dim; // dimension of data
Mrpt_Parameters par;
int n_trees = 0; // number of RP-trees
int depth = 0; // depth of an RP-tree with median split
float density = -1.0; // expected ratio of non-zero components in a projection matrix
int n_pool = 0; // amount of random vectors needed for all the RP-trees
int n_array = 0; // length of the one RP-tree as array
int votes = 0; // optimal number of votes to use
int k = 0;
enum itype {normal, autotuned, autotuned_unpruned};
itype index_type = normal;
// Member variables used in autotuning:
int depth_min = 0;
int votes_max = 0;
const double epsilon = 0.0001; // error bound for comparisons of recall levels
std::vector<Eigen::MatrixXd> cs_sizes;
std::pair<double,double> beta_projection, beta_exact;
std::vector<std::map<int,std::pair<double,double>>> beta_voting;
std::set<Mrpt_Parameters,decltype(is_faster)*> opt_pars;
};
#endif // CPP_MRPT_H_
|
cc_conv2d.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#include "cc_assert.h"
#include "cc_array.h"
#include "cc_basic.h"
#include "cc_fmap2d.h"
#include "cc_pad2d.h"
#include "cc_tsrmgr.h"
#include "util_list.h"
#include "util_log.h"
#include "cc_conv2d.h"
#include "global_fn_cfg.h"
extern fn_conv2d _conv2d;
cc_ssize cc_conv2d_shape_calc(
cc_ssize i, cc_ssize k, cc_ssize s, cc_ssize p)
{
return (cc_ssize)((i - k + 2 * p) / s) + 1;
}
cc_tensor_t *cc_conv2d(const cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_ssize s, cc_ssize p, cc_ssize off, const char *name)
{
cc_uint8 *omp_out_buf = NULL;
cc_tensor_t *oup = NULL;
const cc_tensor_t *inp_pad;
cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size,
k_ch_mem_size, k_mem_size, num_omp_threads, i, j;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
char pad_name[CC_TSR_NAME_LEN];
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM);
cc_assert_zero(*inp->dtype - *kernel->dtype);
cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C]
- kernel->shape[CC_CONV2D_KERNEL_I]);
#endif
if (p) {
sprintf(pad_name, "%s%s",
inp->name, CC_CONV2D_PAD_NAME_SURFFIX);
inp_pad = cc_pad2d(inp, p, off, pad_name);
}
else
inp_pad = inp;
#ifdef AUTO_TSRMGR
oup = cc_tsrmgr_get(name);
#endif
if (!oup) {
shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O];
shape[CC_CNN2D_SHAPE_H] = cc_conv2d_shape_calc(
inp->shape[CC_CNN2D_SHAPE_H],
kernel->shape[CC_CONV2D_KERNEL_H], s, p);
shape[CC_CNN2D_SHAPE_W] = cc_conv2d_shape_calc(
inp->shape[CC_CNN2D_SHAPE_W],
kernel->shape[CC_CONV2D_KERNEL_W], s, p);
oup = cc_create(shape, *inp->dtype, name);
}
o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] *
oup->shape[CC_CNN2D_SHAPE_H];
o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype);
p_ch_mem_size = inp_pad->shape[CC_CNN2D_SHAPE_W] *
inp_pad->shape[CC_CNN2D_SHAPE_H] *
cc_dtype_size(*inp->dtype);
k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] *
kernel->shape[CC_CONV2D_KERNEL_H] *
cc_dtype_size(*kernel->dtype);
k_mem_size = k_ch_mem_size * kernel->shape[CC_CONV2D_KERNEL_I];
num_omp_threads = 1;
#ifdef ENABLE_OPENMP
num_omp_threads = omp_get_max_threads();
#endif
cc_assert_alloc(omp_out_buf =
(cc_uint8*)malloc(o_ch_mem_size * num_omp_threads));
#ifdef AUTO_TSRMGR
memset(oup->data, 0,
list_getlen(oup->container, CC_TENSOR_DATA));
#endif
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, j)
#endif
for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) {
for (j = 0; j < kernel->shape[CC_CONV2D_KERNEL_I]; ++j)
{
#ifdef ENABLE_OPENMP
_conv2d((inp_pad->data + j * p_ch_mem_size),
omp_out_buf + omp_get_thread_num() * o_ch_mem_size,
inp_pad->shape[CC_CNN2D_SHAPE_W],
inp_pad->shape[CC_CNN2D_SHAPE_H],
s, s, (kernel->data + (k_mem_size * i) +
k_ch_mem_size * j),
kernel->shape[CC_CONV2D_KERNEL_W],
*kernel->dtype);
cc_array_add_ew(oup->data + o_ch_mem_size * i,
o_ch_size, oup->data + o_ch_mem_size * i,
omp_out_buf + omp_get_thread_num() * o_ch_mem_size,
*oup->dtype);
#else
_conv2d((inp_pad->data + p_ch_mem_size * j),
omp_out_buf, inp_pad->shape[CC_CNN2D_SHAPE_W],
inp_pad->shape[CC_CNN2D_SHAPE_H],
s, s, (kernel->data + (k_mem_size * i) +
k_ch_mem_size * j),
kernel->shape[CC_CONV2D_KERNEL_W],
*kernel->dtype);
cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size,
oup->data + o_ch_mem_size * i, omp_out_buf,
*oup->dtype);
#endif
}
}
free(omp_out_buf);
#ifndef AUTO_TSRMGR
if (p)
cc_free((cc_tensor_t*)inp_pad);
#endif
if (bias)
oup = cc_fmap2d_bias(oup, bias, oup->name);
return oup;
}
cc_tensor_t *cc_dw_conv2d(cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_ssize s, cc_ssize p, cc_ssize off, const char *name)
{
cc_tensor_t *inp_pad, *oup = NULL;
cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size, k_ch_mem_size, i;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
char pad_name[CC_TSR_NAME_LEN];
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM);
cc_assert_zero(*inp->dtype - *kernel->dtype);
cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C]
- kernel->shape[CC_CONV2D_KERNEL_O]);
#endif
if (p) {
sprintf(pad_name, "%s%s",
inp->name, CC_CONV2D_PAD_NAME_SURFFIX);
inp_pad = cc_pad2d(inp, p, off, pad_name);
}
else
inp_pad = inp;
#ifdef AUTO_TSRMGR
oup = cc_tsrmgr_get(name);
#endif
if (!oup) {
shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O];
shape[CC_CNN2D_SHAPE_H] = cc_conv2d_shape_calc(
inp->shape[CC_CNN2D_SHAPE_H],
kernel->shape[CC_CONV2D_KERNEL_H], s, p);
shape[CC_CNN2D_SHAPE_W] = cc_conv2d_shape_calc(
inp->shape[CC_CNN2D_SHAPE_W],
kernel->shape[CC_CONV2D_KERNEL_W], s, p);
oup = cc_create(shape, *inp->dtype, name);
}
o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] *
oup->shape[CC_CNN2D_SHAPE_H];
o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype);
p_ch_mem_size = inp_pad->shape[CC_CNN2D_SHAPE_W] *
inp_pad->shape[CC_CNN2D_SHAPE_H] *
cc_dtype_size(*inp->dtype);
k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] *
kernel->shape[CC_CONV2D_KERNEL_H] *
cc_dtype_size(*kernel->dtype);
#ifdef AUTO_TSRMGR
memset(oup->data, 0,
list_getlen(oup->container, CC_TENSOR_DATA));
#endif
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) {
_conv2d((inp_pad->data + i * p_ch_mem_size),
oup->data + i * o_ch_mem_size,
inp_pad->shape[CC_CNN2D_SHAPE_W],
inp_pad->shape[CC_CNN2D_SHAPE_H],
s, s, kernel->data + (k_ch_mem_size * i),
kernel->shape[CC_CONV2D_KERNEL_W],
*kernel->dtype);
}
if (!bias){
#ifndef AUTO_TSRMGR
if (p)
cc_free(inp_pad);
#endif
return oup;
} else {
oup = cc_fmap2d_bias(oup, bias, oup->name);
}
#ifndef AUTO_TSRMGR
if (p)
cc_free(inp_pad);
#endif
return oup;
}
cc_tensor_t *cc_pw_conv2d(cc_tensor_t *inp, const cc_tensor_t *kernel,
const cc_tensor_t *bias, const char *name)
{
cc_uint8 *omp_out_buf = NULL;
cc_tensor_t *oup = NULL;
cc_ssize o_ch_size, o_ch_mem_size,
k_ch_mem_size, k_mem_size, num_omp_threads, i, j;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM);
cc_assert_zero(*inp->dtype - *kernel->dtype);
cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C]
- kernel->shape[CC_CONV2D_KERNEL_I]);
#endif
#ifdef AUTO_TSRMGR
oup = cc_tsrmgr_get(name);
#endif
if (!oup) {
shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O];
shape[CC_CNN2D_SHAPE_H] = inp->shape[CC_CNN2D_SHAPE_H];
shape[CC_CNN2D_SHAPE_W] = inp->shape[CC_CNN2D_SHAPE_W];
oup = cc_create(shape, *inp->dtype, name);
}
o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] *
oup->shape[CC_CNN2D_SHAPE_H];
o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype);
k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] *
kernel->shape[CC_CONV2D_KERNEL_H] *
cc_dtype_size(*kernel->dtype);
k_mem_size = k_ch_mem_size * kernel->shape[CC_CONV2D_KERNEL_I];
num_omp_threads = 1;
#ifdef ENABLE_OPENMP
num_omp_threads = omp_get_max_threads();
#endif
cc_assert_alloc(omp_out_buf =
(cc_uint8*)malloc(o_ch_mem_size * num_omp_threads));
#ifdef AUTO_TSRMGR
memset(oup->data, 0,
list_getlen(oup->container, CC_TENSOR_DATA));
#endif
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, j)
#endif
for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) {
for (j = 0; j < kernel->shape[CC_CONV2D_KERNEL_I]; ++j)
{
#ifdef ENABLE_OPENMP
cc_array_mul_by(
omp_out_buf + omp_get_thread_num() * o_ch_mem_size,
o_ch_size, inp->data + o_ch_mem_size * j,
kernel->data + k_mem_size * i + k_ch_mem_size * j,
*oup->dtype);
cc_array_add_ew(oup->data + o_ch_mem_size * i,
o_ch_size, oup->data + o_ch_mem_size * i,
omp_out_buf + omp_get_thread_num() * o_ch_mem_size,
*oup->dtype);
#else
cc_array_mul_by(omp_out_buf, o_ch_size,
inp->data + o_ch_mem_size * j,
kernel->data + k_mem_size * i + k_ch_mem_size * j,
*oup->dtype);
cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size,
oup->data + o_ch_mem_size * i, omp_out_buf,
*oup->dtype);
#endif
}
}
free(omp_out_buf);
if (!bias)
return oup;
else
oup = cc_fmap2d_bias(oup, bias, oup->name);
return oup;
}
|
taskloop_untied_threadid.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt && !clang
// Clang 10.0 seems ignoring the taskloop's "untied" attribute.
// We mark taskloop + untied with Clang as unsupported so far.
#include "omp_testsuite.h"
#include <string.h>
#include <stdio.h>
int test_taskloop_untied_threadid(int num_threads) {
int vals[NUM_TASKS];
memset(vals, 0, sizeof(vals));
#pragma omp parallel num_threads(num_threads)
{
#pragma omp master
{
int i;
#pragma omp taskloop grainsize(1) untied
for (i = 0; i < NUM_TASKS; i++) {
{
ABT_thread abt_thread;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread));
// Context switching in OpenMP.
#pragma omp taskyield
int omp_thread_id2 = omp_get_thread_num();
ABT_thread abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2));
ABT_bool abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2,
&abt_thread_equal));
if (abt_thread_equal == ABT_TRUE) {
vals[i] += 1;
}
// Context switching in Argobots.
ABT_EXIT_IF_FAIL(ABT_thread_yield());
int omp_thread_id3 = omp_get_thread_num();
if (omp_thread_id2 == omp_thread_id3) {
// Argobots context switch does not change the thread-task mapping.
vals[i] += 2;
}
}
}
}
}
int index;
for (index = 0; index < NUM_TASKS; index++) {
if (vals[index] != 3) {
printf("vals[%d] == %d\n", index, vals[index]);
return 0;
}
}
return 1;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_taskloop_untied_threadid(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
ncpar.c | #include <stdio.h>
#include <stdlib.h>
#include<omp.h>
#define N 1000
int main (int argc, char *argv[])
{
double start,end;
float array[N],num;
int i,count,randindex;
omp_set_num_threads(4);
FILE *fptr;
fptr = fopen("A.txt", "w");
for(i=0;i<N;i++){
array[i]= rand() % 100;
fprintf(fptr, "%f ", array[i]);
}
float b[10];
for(i=1;i<2;i++){
randindex= (rand()+i)%100;
b[i-1] = array[randindex];
}
start = omp_get_wtime();
for(i=0;i<1;i++){
count = 0;
num = b[i];
#pragma omp for
for(i=0;i<N;i++)
{
if(array[i]==num)
count++;
}
printf("Occurrence of %g is: %d\n", num, count);
}
end = omp_get_wtime() - start;
printf("Time = %.6g\n",end);
return 0;
}
|
DRB007-indirectaccess3-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have distance of 12 (p1 - p2 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
An index set has two indices with distance of 12 :
indexSet[3]- indexSet[0] = 533 - 521 = 12
So there is loop carried dependence for N=0 and N=3.
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
N is 180, two iteraions with N=0 and N= 3 have loop carried dependences.
For static even scheduling, we must have at least 60 threads (180/60=3 iterations)
so iteration 0 and 3 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 533, 529, 531, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for private(i)
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
sparselu.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if ( r_err == 0.0 ) continue;
if (r_err < 0.0 ) r_err = -r_err;
if ( M[i*bots_arg_size_1+j] == 0 )
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]);
return FALSE;
}
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel
#pragma omp single
#pragma omp task
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
#pragma omp taskwait
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
#pragma omp taskwait
}
bots_message(" completed!\n");
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
THTensorMath.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#ifndef NAN
#define NAN (nan(NULL))
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define TH_OMP_OVERHEAD_THRESHOLD 100000
#ifdef _OPENMP
#ifndef _WIN32
#define PRAGMA(P) _Pragma(#P)
#else
#define PRAGMA(P) __pragma(P)
#endif
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \
ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#define TH_CHECK_SAME_SIZE(TENSOR1, TENSOR2) \
{ \
if(!THTensor_(isSameSizeAs)(TENSOR1, TENSOR2)) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THError("inconsistent tensor size, expected %s %s and %s %s to have the same size", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str); \
} \
}
// Used for `scatter` and `scatterAdd`
// Assumes TENSOR1 is real
// TENSOR2 is src
// TENSOR3 is index
// Tests:
// 1. index->size[d] <= src->size[d] for all d
// 2. index->size[d] <= real->size[d] for all d != dim
#define TH_TENSOR_DIM_APPLY3_SIZE_SCATTER(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \
{ \
int shape_check_flag = 0; \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
{ \
int64_t TENSOR3##_dim_size = TENSOR3->size[TH_TENSOR_DIM_APPLY_i]; \
if (TH_TENSOR_DIM_APPLY_i != DIMENSION) { \
if (TENSOR3##_dim_size > TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) { \
shape_check_flag = 1; \
break; \
} \
} \
if (TENSOR3##_dim_size > TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \
shape_check_flag = 1; \
break; \
} \
} \
if (shape_check_flag == 1) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \
THError("Expected %s %s to be smaller size than %s %s and to be smaller than %s %s apart from dimension %d", \
#TENSOR3, T3buff.str, #TENSOR2, T2buff.str, #TENSOR1, T1buff.str, DIMENSION); \
} \
}
static inline real THTensor_(powOne)(real x, real y) {
#if defined(TH_REAL_IS_FLOAT)
return powf(x, y);
#elif defined(TH_REAL_IS_DOUBLE)
return pow(x, y);
#else
THArgCheck(y >= 0, 1,
"Integers to negative integer powers are not allowed");
real result = 1;
while (y) {
if (y & 1) {
result *= x;
}
y /= 2;
x *= x;
}
return result;
#endif
}
void THTensor_(fill)(THTensor *r_, real value)
{
if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) {
TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len););
} else {
TH_TENSOR_APPLY(real, r_,
if (r__stride == 1) {
THVector_(fill)(r__data, value, r__size);
r__i = r__size;
r__data += r__stride * r__size;
break;
} else {
*r__data = value;
}
);
}
}
void THTensor_(zero)(THTensor *r_)
{
THTensor_(fill)(r_, 0);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = value;
});
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
ptrdiff_t cntr = 0;
ptrdiff_t nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
ptrdiff_t numel = THByteTensor_sumall(mask);
real *tensor_data;
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(src_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
ptrdiff_t numel = 0;
int64_t *subscript_data;
int64_t i = 0;
int64_t dim;
int64_t div = 1;
#ifdef TH_REAL_IS_HALF
#define IS_NONZERO(val) ((val.x & 0x7fff) != 0)
#else
#define IS_NONZERO(val) ((val)!=0)
#endif
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
++numel;
});
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
ptrdiff_t i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
int64_t *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(src->nDimension > 0, 2, "Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
int64_t max = src->size[0] - 1 + TH_INDEX_BASE;
for (i=0; i<numel; i++) {
if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
int64_t *index_data;
// Error checking for this function has moved to ATen!!
numel = THLongTensor_nElement(index);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
static ptrdiff_t THTensor_(dataOffset)(THTensor* tensor, ptrdiff_t linearIndex) {
int64_t *size = tensor->size;
int64_t *stride = tensor->stride;
int nDim = tensor->nDimension;
ptrdiff_t dataOffset = 0;
for (int i = nDim - 1; i >= 0; i--) {
dataOffset += (linearIndex % size[i]) * stride[i];
linearIndex /= size[i];
}
return dataOffset;
}
static void THTensor_(checkLinearIndex)(int64_t linearIndex, int64_t numel) {
THArgCheck(linearIndex < numel && linearIndex >= -numel, 2, "out of range: %d out of %d", (int)linearIndex, (int)numel);
}
static int64_t THTensor_(wrapLinearIndex)(int64_t linearIndex, int64_t numel) {
return linearIndex < 0 ? linearIndex + numel : linearIndex;
}
void THTensor_(take)(THTensor *r_, THTensor *src, THLongTensor *index)
{
THTensor_(resizeNd)(r_, index->nDimension, index->size, NULL);
THTensor* dst = THTensor_(newContiguous)(r_);
index = THLongTensor_newContiguous(index);
int64_t* index_data = THLongTensor_data(index);
ptrdiff_t srcElements = THTensor_(nElement)(src);
real* src_data = THTensor_(data)(src);
real* dst_data = THTensor_(data)(dst);
ptrdiff_t nIndices = THLongTensor_nElement(index);
int isContiguous = THTensor_(isContiguous)(src);
// Exceptions must not be thrown across OpenMP parallel sections, so we
// record the value of the invalid index and throw the exception after the
// loop.
int64_t invalidIdx = -1;
ptrdiff_t i;
#pragma omp parallel for if(nIndices > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i = 0; i < nIndices; i++) {
int64_t idx = index_data[i];
if (idx < srcElements && idx >= -srcElements) {
idx = THTensor_(wrapLinearIndex)(idx, srcElements);
if (isContiguous) {
dst_data[i] = src_data[idx];
} else {
dst_data[i] = src_data[THTensor_(dataOffset)(src, idx)];
}
} else {
THAtomicCompareAndSwapLong(&invalidIdx, -1, idx);
}
}
if (invalidIdx >= 0) {
THTensor_(checkLinearIndex)(invalidIdx, srcElements);
}
THLongTensor_free(index);
THTensor_(freeCopyTo)(dst, r_);
}
void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int accumulate)
{
THArgCheck(THLongTensor_nElement(index) == THTensor_(nElement)(src), 3,
"src should have the same number of elements as index");
index = THLongTensor_newContiguous(index);
src = THTensor_(newContiguous)(src);
real* data = THTensor_(data)(tensor);
ptrdiff_t numel = THTensor_(nElement)(tensor);
int is_contiguous = THTensor_(isContiguous)(tensor);
TH_TENSOR_APPLY2(int64_t, index, real, src,
THTensor_(checkLinearIndex)(*index_data, numel);
int64_t linearIndex = THTensor_(wrapLinearIndex)(*index_data, numel);
int64_t dataOffset = is_contiguous ? linearIndex : THTensor_(dataOffset)(tensor, linearIndex);
if (accumulate) {
data[dataOffset] += *src_data;
} else {
data[dataOffset] = *src_data;
}
);
THTensor_(free)(src);
THLongTensor_free(index);
}
void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
int64_t *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,
index_data[i] - TH_INDEX_BASE,
THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
ptrdiff_t i, numel;
THTensor *tSlice;
int64_t *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
int64_t elems_per_row, i, idx;
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(dim >= 0 && dim < THTensor_(nDimension)(tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in gather");
}
*(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_SCATTER,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_SCATTER,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatterAdd");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
#undef th_isnan
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan(val) \
(std::isnan(val))
#else
#define th_isnan(val) (0)
#endif
#undef th_isnan_break
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan_break(val) \
if (std::isnan(val)) break;
#else
#define th_isnan_break(val)
#endif
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
th_isnan_break(value)
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
th_isnan_break(value)
});
return theMax;
}
static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride);
real THTensor_(medianall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
real theMedian;
ptrdiff_t numel;
int64_t k;
THTensor *temp_;
real *temp__data;
numel = THTensor_(nElement)(tensor);
k = (numel-1) >> 1;
temp_ = THTensor_(newClone)(tensor);
temp__data = THTensor_(data)(temp_);
THTensor_(quickselectnoidx)(temp__data, k, numel, 1);
theMedian = temp__data[k];
THTensor_(free)(temp_);
return theMedian;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if(inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, +:sum, sum += *tensor_data;);
}
#else
serial_path = 1;
#endif
if (serial_path) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
}
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if(inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, *:prod, prod *= *tensor_data;);
}
#else
serial_path = 1;
#endif
if (serial_path) {
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
}
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data + value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(sub)(THTensor *r_, THTensor *t, real value)
{
THTensor_(add)(r_, t, -value);
}
void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha)
{
THTensor_(add)(r_, t, value * alpha);
}
void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha)
{
THTensor_(add)(r_, t, -value * alpha);
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data * value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data / value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(lshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(mul)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(mul)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("lshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << value;
#else
rp[i] = ((ureal) tp[i]) << value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) << value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) << value););
#endif
}
#endif
}
void THTensor_(rshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(div)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(div)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("rshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> value;
#else
rp[i] = ((ureal) tp[i]) >> value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) >> value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value););
#endif
}
#endif
}
void THTensor_(fmod)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], value);
#else
rp[i] = tp[i] % value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data % value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value););
#endif
}
}
static inline bool has_different_sign(real a, real b) {
return (a < 0) != (b < 0);
}
void THTensor_(remainder)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value);
#else
// There is no NAN for integers
rp[i] = tp[i] % value;
if (has_different_sign(rp[i], value))
rp[i] += value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data % value;
if (has_different_sign(*r__data, value)) *r__data += value;);
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value;
if (has_different_sign(*r__data, value)) *r__data += value;);
#endif
}
}
void THTensor_(bitand)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int serial_path = 0;
int tContig = THTensor_(isContiguous)(t);
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] & value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data & value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;);
}
#endif
}
void THTensor_(bitor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] | value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data | value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;);
}
#endif
}
void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] ^ value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data ^ value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;);
}
#endif
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
/* real t_val; */
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len););
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(csub)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(cadd)(r_, t, -value, src);
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len););
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(pow)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if(value == 1){
THTensor_(copy)(r_, t);
}
else if(value == 2){
THTensor_(cmul)(r_, t, t);
}
else if(value == 3){
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * *t_data * *t_data;);
}
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
else if(value == 0.5){
THTensor_(sqrt)(r_, t);
}
else if(value == -0.5){
THTensor_(rsqrt)(r_, t);
}
else if(value == -1){
THTensor_(cinv)(r_, t);
}
else if(value == -2){
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data););
}
else{
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(pow)(*t_data, value););
}
#undef TH_MATH_NAME
#else
else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(*t_data, value););
}
#endif
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = THTensor_(powOne)(tp[i], sp[i]);
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data););
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len););
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("clshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] * powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] * pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << sp[i];
#else
rp[i] = ((ureal) tp[i]) << sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;);
#endif
}
}
void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("crshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] / powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] / pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> sp[i];
#else
rp[i] = ((ureal) tp[i]) >> sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;);
#endif
}
}
void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], sp[i]);
#else
rp[i] = tp[i] % sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig,real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
}
void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]);
#else
// There is no NAN for integers
rp[i] = tp[i] % sp[i];
if (rp[i] * sp[i] < 0)
rp[i] += sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (*r__data * *src_data < 0) *r__data += *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
// There is no NAN for integers
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (*r__data * *src_data < 0) *r__data += *src_data;);
#endif
}
}
void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] & sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#endif
}
void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] | sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#endif
}
void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] ^ sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#endif
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = THTensor_(powOne)(value, tp[i]);
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data););
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
int64_t r_Size = THTensor_(nElement)(r_);
int64_t src1Size = THTensor_(nElement)(src1);
int64_t src2Size = THTensor_(nElement)(src2);
int r_Contig = THTensor_(isContiguous)(r_);
int src1Contig = THTensor_(isContiguous)(src1);
int src2Contig = THTensor_(isContiguous)(src2);
int serial_path = 0;
if( (src1Size == src2Size) && (src1Size == r_Size) ){
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
#else
serial_path = 1;
#endif
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
int64_t r_Size = THTensor_(nElement)(r_);
int64_t src1Size = THTensor_(nElement)(src1);
int64_t src2Size = THTensor_(nElement)(src2);
int r_Contig = THTensor_(isContiguous)(r_);
int src1Contig = THTensor_(isContiguous)(src1);
int src2Contig = THTensor_(isContiguous)(src2);
int serial_path = 0;
if( (src1Size == src2Size) && (src1Size == r_Size) ){
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
#else
serial_path = 1;
#endif
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
// n == 1 || lda >= max(1, m)
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
if(mat->stride[0] == 1 && LDA_COND(mat->size[0], mat->size[1], mat->stride[1]))
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1 && LDA_COND(mat->size[1], mat->size[0], mat->stride[0]))
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
#undef LDA_COND
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
int64_t N1 = m1->size[0];
int64_t N2 = m2->size[0];
int64_t dim;
real *m1_p;
real *m2_p;
real *r_p;
int64_t i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
int64_t j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
int free_m1 = 0;
int free_m2 = 0;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
if (beta != 0.0) {
THTensor_(copy)(r_, t);
}
}
// n == 1 || ldc >= max(1, m)
#define LDC_COND(M, N, LDC) ((N) == 1 || (LDC) >= THMax(1, M))
/* r_ */
if(r_->stride[0] == 1 &&
LDC_COND(r_->size[0], r_->size[1], r_->stride[1]))
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
LDC_COND(r_->size[1], r_->size[0], r_->stride[0]))
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
// make r__ FORTRAN contiguous
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
r__ = THTensor_(newClone)(transp_r_);
THTensor_(free)(transp_r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
#undef LDC_COND
int64_t m = r__->size[(transpose_r == 'n' ? 0 : 1)];
int64_t n = r__->size[(transpose_r == 'n' ? 1 : 0)];
int64_t k = m1->size[(transpose_r == 'n' ? 1 : 0)];
int64_t ldr__ = r__->stride[(transpose_r == 'n' ? 1 : 0)];
/* m1 */
/* Need ldm1_ >= max(1, (transpose_m1 == 'n' ? m : k)) */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, m))
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, k))
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
free_m1 = 1;
}
/* m2 */
/* Need ldm2_ >= max(1, (transpose_m2 == 'n' ? k : n)) */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, k))
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, n))
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
free_m2 = 1;
}
int64_t ldm1_ = (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]);
int64_t ldm2_ = (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]);
#pragma omp critical(blasgemm)
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
m,
n,
k,
alpha,
THTensor_(data)(m1_),
ldm1_,
THTensor_(data)(m2_),
ldm2_,
beta,
THTensor_(data)(r__),
ldr__);
/* free intermediate variables */
if(free_m1)
THTensor_(free)(m1_);
if(free_m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta == 0) {
THTensor_(zero)(r_);
}
else if(beta != 1)
THTensor_(mul)(r_, r_, beta);
// n == 1 || lda >= max(1, m)
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
if(r_->stride[0] == 1 && LDA_COND(vec1->size[0], vec2->size[0], r_->stride[1]))
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1 && LDA_COND(vec2->size[0], vec1->size[0], r_->stride[0]))
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
#undef LDA_COND
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
int64_t batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
int64_t dim1 = THTensor_(size)(batch1, 1);
int64_t dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
int64_t batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
int64_t bs = THTensor_(size)(batch1, 0);
int64_t dim1 = THTensor_(size)(batch1, 1);
int64_t dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
ptrdiff_t THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
// Helper function to be used in a reduction operation.
// Due to resize semantics of outputs, if the specified output tensor r_ has
// same size as the output of the reduction operation, then any noncontiguities
// in r_ should be preserved.
// The reduction operation, however, needs to act on r_ with an extra dimension
// (the reduced dimension), so this function "resizes" r_ and preserves its
// noncontiguities if necessary.
void THTensor_(preserveReduceDimSemantics)(
THTensor *r_, int in_dims, int reduce_dimension, int keepdim) {
if (r_ && !keepdim &&
THTensor_(nDimension)(r_) == in_dims - 1 &&
THTensor_(nDimension)(r_) != 0) {
THTensor_(unsqueeze1d)(r_, r_, reduce_dimension);
}
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
int64_t theIndex;
int64_t i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension,
if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
int64_t theIndex;
int64_t i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value >= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension,
if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
int r_Contig = THTensor_(isContiguous)(r_);
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
if(r_Contig && (tp != rp)){
ptrdiff_t iter = 0;
ptrdiff_t r_Size = THTensor_(nElement)(r_);
int r_Dim = r_->nDimension;
#pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD)
for (iter = 0; iter < r_Size; iter++) {
int j;
int64_t quot;
int64_t rem = iter;
ptrdiff_t tBasicIndex = 0;
for(j = 0; j < r_Dim; ++j) {
if(j != dimension){
quot = rem/r_->stride[j];
rem = rem%r_->stride[j];
tBasicIndex += quot*t->stride[j];
}
}
real *t_data = tp+tBasicIndex;
real *r__data = rp+iter;
*r__data = 0;
for(j=0; j < t->size[dimension]; ++j) {
*r__data += *(t_data + j*t->stride[dimension]);
}
}
} else {
serial_path = 1;
}
}
#else
serial_path = 1;
#endif
if (serial_path) {
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
int64_t i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
} else {
THTensor_(zero)(r_);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;);
THTensor_(free)(temp_);
}
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
int r_Contig = THTensor_(isContiguous)(r_);
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
if(r_Contig && (tp != rp)){
ptrdiff_t iter = 0;
ptrdiff_t r_Size = THTensor_(nElement)(r_);
int r_Dim = r_->nDimension;
#pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD)
for (iter = 0; iter < r_Size; iter++) {
int j;
int64_t quot;
int64_t rem = iter;
ptrdiff_t tBasicIndex = 0;
for(j = 0; j < r_Dim; ++j) {
if(j != dimension){
quot = rem/r_->stride[j];
rem = rem%r_->stride[j];
tBasicIndex += quot*t->stride[j];
}
}
real *t_data = tp+tBasicIndex;
real *r__data = rp+iter;
*r__data = 1;
for(j=0; j < t->size[dimension]; ++j) {
*r__data *= *(t_data + j*t->stride[dimension]);
}
}
} else {
serial_path = 1;
}
}
#else
serial_path = 1;
#endif
if(serial_path) {
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
int64_t i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
} else {
THTensor_(fill)(r_, 1);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;);
THTensor_(free)(temp_);
}
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
int64_t i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
int64_t i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
int64_t i = 0;
int64_t t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(zero)(r_);
}
void THTensor_(onesLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(fill)(r_, 1);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
int64_t t_stride_0 = THTensor_(stride)(t, 0);
int64_t t_size = THTensor_(size)(t, 0);
int64_t sz = t_size + (k >= 0 ? k : -k);
real *r__data;
int64_t r__stride_0;
int64_t r__stride_1;
int64_t i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
int64_t t_stride_0 = THTensor_(stride)(t, 0);
int64_t t_stride_1 = THTensor_(stride)(t, 1);
int64_t sz;
real *r__data;
int64_t r__stride_0;
int64_t i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m)
{
real *r__data;
int64_t i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) {
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
size = (ptrdiff_t) ceil((double)(xmax - xmin) / step);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n)
{
real *r__data;
int64_t r__stride_0;
int64_t i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
int64_t z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define ARR_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ));
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride)
{
int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride)
{
int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension,
int64_t i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension,
int64_t i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm.
This version does not produce indices along with values. */
static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride)
{
int64_t P, L, R, i, j;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
ARR_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
ARR_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
ARR_SWAP(i, j);
} while(1);
ARR_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride)
{
int64_t P, L, R, i, j, swap;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
int64_t *tempi__data;
int64_t t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
real mode = 0;
int64_t modei = 0;
int64_t temp_freq = 0;
int64_t max_freq = 0;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1);
for(i = 0; i < t_size_dim; i++)
{
temp_freq++;
if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1]))
{
if (temp_freq > max_freq)
{
mode = temp__data[i];
modei = tempi__data[i];
max_freq = temp_freq;
}
temp_freq = 0;
}
}
*values__data = mode;
*indices__data = modei;);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, int64_t k, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
int64_t *tempi__data;
int64_t t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range");
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1);
*values__data = temp__data[k-1];
*indices__data = tempi__data[k-1];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
int64_t t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim);
}
void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted)
{
int numDims = THTensor_(nDimension)(t);
THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range");
int64_t sliceSize = THTensor_(size)(t, dim);
THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension");
THTensor *tmpResults = THTensor_(new)();
THTensor_(resize1d)(tmpResults, sliceSize);
real *tmp__data = THTensor_(data)(tmpResults);
THLongTensor *tmpIndices = THLongTensor_new();
THLongTensor_resize1d(tmpIndices, sliceSize);
int64_t *tmpi__data = THLongTensor_data(tmpIndices);
THLongStorage *topKSize = THTensor_(newSizeOf)(t);
THLongStorage_set(topKSize, dim, k);
THTensor_(resize)(rt_, topKSize, NULL);
THLongTensor_resize(ri_, topKSize, NULL);
THLongStorage_free(topKSize);
if (dir) {
/* k largest elements, descending order (optional: see sorted) */
int64_t K = sliceSize - k;
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
if (K > 0)
THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i + K];
ri__data[i*ri__stride] = tmpi__data[i + K];
})
}
else {
/* k smallest elements, ascending order (optional: see sorted) */
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i];
ri__data[i*ri__stride] = tmpi__data[i];
})
}
THTensor_(free)(tmpResults);
THLongTensor_free(tmpIndices);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k)
{
int64_t t_size_0, t_size_1;
int64_t t_stride_0, t_stride_1;
int64_t r__stride_0, r__stride_1;
real *t_data, *r__data;
int64_t r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
int64_t sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k+1); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k)
{
int64_t t_size_0, t_size_1;
int64_t t_stride_0, t_stride_1;
int64_t r__stride_0, r__stride_1;
real *t_data, *r__data;
int64_t r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
int64_t sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THTensor_(catArray)(r_, inputs, 2, dimension);
}
void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension);
inline void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension)
{
int first_dims = first->nDimension;
int second_dims = second->nDimension;
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = first->size[dim];
int64_t second_dim_size = second->size[dim];
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
{
// Find a non-empty tensor to record nDims
int allEmpty = 1;
int nDims = 0;
THTensor *notEmptyTensor;
for (int i = 0; i < numInputs; i++) {
int input_dims = inputs[i]->nDimension;
if (input_dims == 0) {
continue;
}
// We've found a non-empty tensor
allEmpty = 0;
notEmptyTensor = inputs[i];
nDims = input_dims;
break;
}
if (allEmpty) {
return;
}
// Compute cat_dimension based on the non-empty tensor
THArgCheck(dimension >= -1 && dimension < nDims, 4, "invalid dimension %d", dimension);
// When the user input dimension is -1 (i.e. -2 in C)
// Then we pick the last dimension across non-empty tensors.
int cat_dimension = dimension;
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? nDims - 1 : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THTensor *tensor = inputs[i];
if (tensor->nDimension == 0) {
continue;
}
THTensor_(check_shape_except_dim)(notEmptyTensor, tensor, cat_dimension);
cat_dim_size += tensor->size[cat_dimension];
}
// Compute the size of the result
THLongStorage *size = THLongStorage_newWithSize(nDims);
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = notEmptyTensor->size[dim];
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
size->data[dim] = result_dim_size;
}
THTensor_(resize)(result, size, NULL);
// Check contiguity of all inputs and result
int allContiguous = 1;
for (int i = 0; i < numInputs; i++) {
if(inputs[i]->nDimension) {
allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
}
}
allContiguous = allContiguous && THTensor_(isContiguous)(result);
// First path is for contiguous inputs along dim 0
// Second path for non-contiguous
int64_t offset;
if (cat_dimension == 0 && allContiguous) {
real* result_data = result->storage->data + result->storageOffset;
offset = 0;
for (int j = 0; j < numInputs; j++) {
if (inputs[j]->nDimension) {
THTensor* input0 = inputs[j];
real* input0_data = input0->storage->data + input0->storageOffset;
int64_t input0_size = THTensor_(nElement)(input0);
memcpy(result_data + offset, input0_data, input0_size*sizeof(real));
offset += input0_size;
}
}
} else {
offset = 0;
for (int j = 0; j < numInputs; j++) {
if (inputs[j]->nDimension) {
int64_t dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1;
THTensor *nt = THTensor_(newWithTensor)(result);
THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize);
THTensor_(copy)(nt, inputs[j]);
THTensor_(free)(nt);
offset += dimSize;
}
}
}
THLongStorage_free(size);
}
int THTensor_(equal)(THTensor *ta, THTensor* tb)
{
int equal = 1;
if(!THTensor_(isSameSizeAs)(ta, tb))
return 0;
if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) {
real *tap = THTensor_(data)(ta);
real *tbp = THTensor_(data)(tb);
ptrdiff_t sz = THTensor_(nElement)(ta);
ptrdiff_t i;
for (i=0; i<sz; ++i){
if(tap[i] != tbp[i]) return 0;
}
} else {
// Short-circuit the apply function on inequality
TH_TENSOR_APPLY2(real, ta, real, tb,
if (equal && *ta_data != *tb_data) {
equal = 0;
TH_TENSOR_APPLY_hasFinished = 1; break;
})
}
return equal;
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(real, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#ifdef _OPENMP
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
ptrdiff_t r_Size = THTensor_(nElement)(r_); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
int inOMP = omp_in_parallel(); \
if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
else { \
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
}
#define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
ptrdiff_t r_Size = THTensor_(nElement)(r_); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
if (r_Contig && tContig) { \
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \
} else { \
int inOMP = omp_in_parallel(); \
if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
else { \
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
} \
}
#else
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
if (r_Contig && tContig) { \
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \
} else { \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
} \
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
#endif /* int64_t only part */
#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log))
LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma))
LAB_IMPLEMENT_BASIC_FUNCTION(digamma,TH_MATH_NAME(TH_digamma))
LAB_IMPLEMENT_BASIC_FUNCTION(trigamma,TH_MATH_NAME(TH_trigamma))
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p))
LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp))
LAB_IMPLEMENT_BASIC_FUNCTION(expm1,TH_MATH_NAME(expm1))
LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos))
LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos))
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh))
LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin))
LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin))
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh))
LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan))
LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan))
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh))
LAB_IMPLEMENT_BASIC_FUNCTION(erf,TH_MATH_NAME(erf))
LAB_IMPLEMENT_BASIC_FUNCTION(erfinv,TH_erfinv)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor))
LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round))
LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs))
LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc))
LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac))
LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / )
LAB_IMPLEMENT_VECTORIZED_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid))
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data););
}
void THTensor_(polygamma)(THTensor *r_, int64_t n, THTensor *t) {
switch (n) {
case 0: THTensor_(digamma)(r_, t); return;
case 1: THTensor_(trigamma)(r_, t); return;
default: THError("polygamma(n,x) is not implemented for n>=2");
}
}
void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight)
{
THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(sum)(r_, t, dimension, keepdim);
THTensor_(div)(r_, r_, t->size[dimension]);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
// Uses Welford's algorithm for numeric stability
accreal mean = 0;
accreal M2 = 0;
int64_t i;
for (i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
real delta = z - mean;
mean += delta / (i + 1);
real delta2 = z - mean;
M2 += delta * delta2;
}
if (biased && t_size >= 2)
{
*r__data = TH_MATH_NAME(sqrt)(M2 / t_size);
} else if (!biased && t_size >= 2) {
*r__data = TH_MATH_NAME(sqrt)(M2 / (t_size - 1));
} else if (biased && t_size == 1) {
*r__data = 0;
} else {
*r__data = NAN;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
// Uses Welford's algorithm for numeric stability
accreal mean = 0;
accreal M2 = 0;
int64_t i;
for (i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
real delta = z - mean;
mean += delta / (i + 1);
real delta2 = z - mean;
M2 += delta * delta2;
}
if (biased && t_size >= 2)
{
*r__data = M2 / t_size;
} else if (!biased && t_size >= 2) {
*r__data = M2 / (t_size - 1);
} else if (biased && t_size == 1) {
*r__data = 0;
} else {
*r__data = NAN;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
#define DIM_REDUCE(reduce, transform) \
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, \
accreal sum = 0; \
int64_t i; \
for(i = 0; i < t_size; i++) { \
(reduce); \
} \
(transform);) \
if(value == 0) {
DIM_REDUCE(sum += t_data[i*t_stride] != 0.0,
*r__data = sum);
} else if (value == 1) {
DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride]),
*r__data = sum);
} else if (value == 2) {
DIM_REDUCE(sum += t_data[i*t_stride] * t_data[i*t_stride],
*r__data = TH_MATH_NAME(sqrt)(sum));
} else if (value == 3) {
DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride] * t_data[i*t_stride] * t_data[i*t_stride]),
*r__data = TH_MATH_NAME(pow)(sum, 1.0/3));
} else {
DIM_REDUCE(sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(t_data[i*t_stride]), value),
*r__data = TH_MATH_NAME(pow)(sum, 1.0/value));
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
#undef DIM_REDUCE
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else if(value == 3) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += std::abs(z*z*z););
return TH_MATH_NAME(pow)(sum, 1.0/3);
} else {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(*tensor_data - *src_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor, int biased)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor, int biased)
{
return sqrt(THTensor_(varall)(tensor, biased));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, a);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + (b-a)/((real)(n-1))*i;
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue)
{
real minval;
real maxval;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data >= minval && *tensor_data <= maxval) {
const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins);
h_data[THMin(bin, nbins-1)] += 1;
}
);
}
void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue)
{
THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor));
int dimension = 1;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
real minval;
real maxval;
THTensor_(resize2d)(hist, tensor->size[0], nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i;
for(i = 0; i < tensor_size; i++)
{
if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) {
const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins);
hist_data[THMin(bin, nbins-1)] += 1;
}
}
);
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
// Assumes x is close to zero and uses a Taylor expansion.
static inline real THTensor_(beta_grad_alpha_small)(real x, real alpha, real beta) {
const real factor = TH_digamma(alpha) - TH_digamma(alpha + beta) - TH_MATH_NAME(log)(x);
real numer = 1;
real series = numer / alpha * (factor + 1 / alpha);
for (int i = 1; i <= 10; ++i) {
numer *= (i - beta) * x / i;
const real denom = alpha + i;
series += numer / denom * (factor + 1 / denom);
}
const real result = x * TH_MATH_NAME(pow)(1 - x, -beta) * series;
return th_isnan(result) ? 0.0 : result;
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta.
// Assumes x is close to zero and uses a Taylor expansion.
static inline real THTensor_(beta_grad_beta_small)(real x, real alpha, real beta) {
const real factor = TH_digamma(alpha+beta) - TH_digamma(beta);
real numer = 1;
real betas = 1;
real dbetas = 0;
real series = factor / alpha;
for (int i = 1; i <= 8; ++i) {
numer *= -x / i;
dbetas = dbetas * (beta - i) + betas;
betas = betas * (beta - i);
series += numer / (alpha + i) * (dbetas + factor * betas);
}
const real result = -TH_MATH_NAME(pow)(1 - x, 1 - beta) * series;
return th_isnan(result) ? 0.0 : result;
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
// Assumes alpha and beta are both large and uses a Rice saddle point expansion.
// To ensure numerical stability, this computation is performed at higher precision.
static inline real THTensor_(beta_grad_alpha_mid)(double x, double alpha, double beta) {
const double total = alpha + beta;
const double mean = alpha / total;
const double std = sqrt(alpha * beta / (total + 1)) / total;
if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) {
// Avoid the singularity at x = mean.
const double poly = 47 * x * (beta*beta)*(beta*beta) + alpha * (
(43 + 20 * (16 + 27 * beta) * x) * (beta*beta)*beta + alpha * (
3 * (59 + 180 * beta - 90 * x) * (beta*beta) + alpha * (
(453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * (
8 * (1 - x) * (135 * beta - 11)))));
const double prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total);
const double prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total);
return prefactor_num / (1 - x) * poly / prefactor_den;
}
const double prefactor = -x / sqrt(2 * alpha * beta / total);
const double stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha*alpha))
* (1 + 1 / (12 * beta) + 1 / (288 * beta*beta))
/ (1 + 1 / (12 * total) + 1 / (288 * total*total));
const double term1_num = 2 * (alpha*alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta*beta);
const double axbx = alpha * (x-1) + beta * x;
const double term1_den = sqrt(2 * alpha / beta) * pow(total, 1.5f) * axbx*axbx;
const double term1 = term1_num / term1_den;
const double term2 = 0.5f * log(alpha / (total * x));
const double term3_num = sqrt(8 * alpha * beta / total);
const double term3_den = beta * x + alpha * (x - 1);
const double term3 = term3_num / term3_den;
const double term4_base = beta * log(beta / (total * (1 - x))) +
alpha * log(alpha / (total * x));
const double term4 = pow(term4_base, -1.5f);
const double term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4));
return stirling * prefactor * term1234;
}
// Computes a scaled reparameterized gradient
// -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x)
// for random number x drawn from a Beta distribution Beta(alpha,beta).
// This function inputs total=alpha+beta to make it easy to implement
// Dirichlet reparameterized gradients in terms of Betas.
static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) {
const real beta = total - alpha;
const real boundary = total * x * (1 - x);
// Use an asymptotic approximation for x close to 0.
if (x <= 0.5f && boundary < 2.5f) {
return THTensor_(beta_grad_alpha_small)(x, alpha, beta);
}
// Use an asymptotic approximation for x close to 1.
if (x >= 0.5f && boundary < 0.75f) {
return -THTensor_(beta_grad_beta_small)(1 - x, beta, alpha);
}
// Use an asymptotic approximation when alpha and (total - alpha) are both large.
if (alpha > 6 && beta > 6) {
return THTensor_(beta_grad_alpha_mid)(x, alpha, beta);
}
// Use a rational correction to an analytic approximation.
static const real c[2][3][3][4] = {
{{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863},
{0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033},
{-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}},
{{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814},
{-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057},
{0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}},
{{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565},
{0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181},
{0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}},
{{{1, -0.02924021934, -0.04438342661, 0.007285809825},
{0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521},
{-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}},
{{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273},
{0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956},
{-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}},
{{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05},
{0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05},
{-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}},
};
const real u = TH_MATH_NAME(log)(x);
const real a = TH_MATH_NAME(log)(alpha) - u;
const real b = TH_MATH_NAME(log)(total) - a;
const real pow_u[3] = {1, u, u * u};
const real pow_a[3] = {1, a, a * a};
real p = 0.0;
real q = 0.0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
const real ua = pow_u[i] * pow_a[j];
p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3])));
q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3])));
}
}
const real approx = x * (TH_digamma(total) - TH_digamma(alpha)) / beta;
return p / q * approx;
}
void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THTensor *total)
{
x = THTensor_(newContiguous)(x);
alpha = THTensor_(newContiguous)(alpha);
total = THTensor_(newContiguous)(total);
TH_CHECK_SAME_SIZE(alpha, x);
TH_CHECK_SAME_SIZE(total, x);
THTensor_(resizeAs)(self, x);
THTensor* grad = THTensor_(newContiguous)(self);
real*const grad_data = THTensor_(data)(grad);
real*const x_data = THTensor_(data)(x);
real*const alpha_data = THTensor_(data)(alpha);
real*const total_data = THTensor_(data)(total);
const int64_t numel = THTensor_(nElement)(x);
int64_t i;
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for(i = 0; i < numel; ++i) {
grad_data[i] = THTensor_(dirichlet_grad_one)(x_data[i], alpha_data[i], total_data[i]);
}
THTensor_(freeCopyTo)(grad, self);
}
#undef TH_MATH_NAME
#endif /* floating point only part */
#undef IS_NONZERO
#endif
|
known_hosts_fmt_plug.c | /* Quick-and-dirty cracker for ~/.ssh/known_hosts hashes (HashKnownHosts yes).
*
* Based on http://blog.tremily.us/posts/known_hosts/
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Significant speedup Dec 2014, JimF. OMPSCALE was way off, and:
* NOTE Appears that salt and password are reversed?? With this info, salt was
* redone, to compute the first half of the HMAC, and double the speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_known_hosts;
#elif FMT_REGISTERS_H
john_register_one(&fmt_known_hosts);
#else
#include "sha.h"
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "known_hosts"
#define FORMAT_TAG "$known_hosts$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_NAME "HashKnownHosts HMAC-SHA1"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 20
#define BINARY_ENCODED_SIZE 28
#define PAD_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests known_hosts_tests[] = {
{"$known_hosts$|1|yivSFSAv9mhGu/GPc14KpaPMSjE=|I9L3FH6RGefWIFb0Po74BVN3Fto=", "213.100.98.219"},
{"$known_hosts$|1|pgjIzNM77FYsBHLfKvvG9aWpKAA=|XbHqTCXG1JAV6fb2h2HT8MT7kGU=", "192.30.252.130"},
{"$known_hosts$|1|vAQX51f9EfXY33/j3upxFIlI1ds=|q+CzSLaa1EaSsAQzP/XRM/gaFQ4=", "192.30.252.128"},
{"$known_hosts$|1|F1E1KeoE/eEWhi10WpGv4OdiO6Y=|3988QV0VE8wmZL7suNrYQLITLCg=", "192.168.1.61"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
SHA_CTX ipad_ctx;
SHA_CTX opad_ctx;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
p = q = ciphertext + TAG_LENGTH;
if (p[0] != '|' || p[2] != '|')
return 0;
p += 3;
q = strchr(p, '|');
if (q -p != BINARY_ENCODED_SIZE)
return 0;
p = strrchr(ciphertext, '|') + 1;
if (strlen(p) != BINARY_ENCODED_SIZE)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *p, *q;
unsigned char ipad[20], opad[20], salt[20 + 4 + 1];
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH + 3;
q = strchr(p, '|');
base64_decode(p, q - p, (char*)salt);
for (i = 0; i < 20; ++i) {
ipad[i] = salt[i] ^ 0x36;
opad[i] = salt[i] ^ 0x5C;
}
SHA1_Init(&cs.ipad_ctx);
SHA1_Update(&cs.ipad_ctx, ipad, 20);
SHA1_Update(&cs.ipad_ctx, "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 44);
SHA1_Init(&cs.opad_ctx);
SHA1_Update(&cs.opad_ctx, opad, 20);
SHA1_Update(&cs.opad_ctx, "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 44);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1 + 4];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '|') + 1;
base64_decode((char*)p, BINARY_ENCODED_SIZE, (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
SHA_CTX ctx;
memcpy(&ctx, &cur_salt->ipad_ctx, sizeof(ctx));
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
memcpy(&ctx, &cur_salt->opad_ctx, sizeof(ctx));
SHA1_Update(&ctx, crypt_out[index], BINARY_SIZE);
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void known_hosts_set_key(char *key, int index)
{
int len = strlen(key);
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_known_hosts = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
known_hosts_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
known_hosts_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
c55c7aec73df0f31d67fbe39510946453b899e1d.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data;
float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))
{
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp target teams distribute parallel for collapse(3)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12];
u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1);
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0]));
int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1]));
int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2]));
int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1;
int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1;
int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8;
}
}
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)
{
int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0]));
int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1]));
int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2]));
int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1;
int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1;
int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]);
float sum = 0.0F;
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
rec[time][p_rec] = sum;
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
#pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
/* Backdoor edit at Mon Mar 2 15:29:50 2020*/
|
dft_dft_solver.h | #ifndef _DFT_DFT_SOLVER_
#define _DFT_DFT_SOLVER_
#include <complex>
#include "spectral/spectral.h"
#include "blueprint.h"
#include "equations.h"
namespace spectral
{
/*! @brief Solver for periodic boundary conditions of the spectral equations.
* @ingroup solvers
*/
template< size_t n>
class DFT_DFT_Solver
{
public:
typedef Matrix<double, TL_DFT> Matrix_Type;
/*! @brief Construct a solver for periodic boundary conditions
*
* The constructor allocates storage for the solver
* and initializes all fourier coefficients as well as
* all low level solvers needed.
* @param blueprint Contains all the necessary parameters.
* @throw Message If your parameters are inconsistent.
*/
DFT_DFT_Solver( const Parameters& blueprint);
/*! @brief Prepare Solver for execution
*
* This function takes the fields and computes the missing
* one according to the target parameter passed.
* @param v Container with three non void matrices
* @param t which Matrix is missing?
*/
void init( std::array< Matrix<double,TL_DFT>, n>& v, enum target t);
/**
* @brief Perform first initializing step
*
*/
void first_step();
/**
* @brief Perform second initializing step
*
* After that the step function can be used
*/
void second_step();
/*! @brief Perform a step by the 3 step Karniadakis scheme
*
* @attention At least one call of first_step() and second_step() is necessary
* */
void step(const Matrix<double, TL_DFT>& src){ step_<TL_ORDER3>(src);}
/*! @brief Get the result
You get the solution matrix of the current timestep.
@param t The field you want
@return A Read only reference to the field
@attention The reference is only valid until the next call to
the step() function!
*/
const Matrix<double, TL_DFT>& getField( enum target t) const;
/*! @brief Get the result
Use this function when you want to call step() without
destroying the solution.
@param m
In exchange for the solution matrix you have to provide
storage for further calculations. The field is swapped in.
@param t
The field you want.
@attention The fields you get are not the ones of the current
timestep. You get the fields that are not needed any more.
This means the densities are 4 timesteps "old" whereas
the potential is the one of the last timestep.
*/
void getField( Matrix<double, TL_DFT>& m, enum target t);
const std::array<Matrix<double, TL_DFT>, n>& getDensity( )const{return dens;}
const std::array<Matrix<double, TL_DFT>, n>& getPotential( )const{return phi;}
/*! @brief Get the parameters of the solver.
@return The parameters in use.
@note You cannot change parameters once constructed.
*/
const Parameters& blueprint() const { return blue;}
private:
typedef std::complex<double> complex;
//methods
void init_coefficients( const Parameters& p);
void compute_cphi();//multiply cphi
double dot( const Matrix_Type& m1, const Matrix_Type& m2);
template< enum stepper S>
void step_(const Matrix<double, TL_DFT>& src);
//members
const size_t rows, cols;
const size_t crows, ccols;
const Parameters blue;
/////////////////fields//////////////////////////////////
//GhostMatrix<double, TL_DFT> ghostdens, ghostphi;
std::array< Matrix<double, TL_DFT>, n> dens, phi, nonlinear;
/////////////////Complex (void) Matrices for fourier transforms///////////
std::array< Matrix< complex>, n> cdens, cphi;
///////////////////Solvers////////////////////////
Arakawa arakawa;
Karniadakis<n, complex, TL_DFT> karniadakis;
DFT_DFT dft_dft;
/////////////////////Coefficients//////////////////////
Matrix< std::array< double, n> > phi_coeff;
std::array< Matrix< double>, n-1> gamma_coeff;
};
template< size_t n>
DFT_DFT_Solver<n>::DFT_DFT_Solver( const Parameters& bp):
rows( bp.ny ), cols( bp.nx ),
crows( rows), ccols( cols/2+1),
blue( bp),
//fields
dens( MatrixArray<double, TL_DFT,n>::construct( rows, cols)),
phi( dens), nonlinear( dens),
cdens( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)),
cphi(cdens),
//Solvers
arakawa( bp.h),
karniadakis(rows, cols, crows, ccols, bp.dt),
dft_dft( rows, cols, FFTW_MEASURE),
//Coefficients
phi_coeff( crows, ccols),
gamma_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols))
{
bp.consistencyCheck();
if( bp.global)
{
std::cerr << "WARNING: GLOBAL solver not implemented yet! \n\
Switch to local solver...\n";
}
init_coefficients( bp);
}
template< size_t n>
void DFT_DFT_Solver<n>::init_coefficients( const Parameters& p)
{
Matrix< QuadMat< complex, n> > coeff( crows, ccols);
double laplace;
int ik;
const complex dymin( 0, 2.*M_PI/p.ly);
const double kxmin2 = 2.*2.*M_PI*M_PI/(double)(p.lx*p.lx),
kymin2 = 2.*2.*M_PI*M_PI/(double)(p.ly*p.ly);
Equations e( p);
Poisson poisson( p);
// dft_dft is not transposing so i is the y index by default
for( unsigned i = 0; i<crows; i++)
for( unsigned j = 0; j<ccols; j++)
{
ik = (i>rows/2) ? (i-rows) : i; //integer division rounded down
laplace = - kxmin2*(double)(j*j) - kymin2*(double)(ik*ik);
if( n == 2)
{
gamma_coeff[0](i,j) = poisson.gamma1_i( laplace);
}
else if( n == 3)
{
gamma_coeff[0](i,j) = poisson.gamma1_i( laplace);
gamma_coeff[1](i,j) = poisson.gamma1_z( laplace);
}
if( rows%2 == 0 && i == rows/2) ik = 0;
e( coeff( i,j), laplace, (double)ik*dymin);
if( laplace == 0) continue;
poisson( phi_coeff(i,j), laplace);
}
//for periodic bc the constant is undefined
for( unsigned k=0; k<n; k++)
phi_coeff(0,0)[k] = 0;
karniadakis.init_coeff( coeff, (double)(rows*cols));
}
template< size_t n>
void DFT_DFT_Solver<n>::init( std::array< Matrix<double, TL_DFT>,n>& v, enum target t)
{
//fourier transform input into cdens
for( unsigned k=0; k<n; k++)
{
#ifdef TL_DEBUG
if( v[k].isVoid())
throw Message("You gave me a void Matrix!!", _ping_);
#endif
dft_dft.r2c( v[k], cdens[k]);
}
//don't forget to normalize coefficients!!
for( unsigned k=0; k<n; k++)
for( unsigned i=0; i<crows; i++)
for( unsigned j=0; j<ccols;j++)
cdens[k](i,j) /= (double)(rows*cols);
switch( t) //which field must be computed?
{
case( ELECTRONS):
//bring cdens and cphi in the right order
swap_fields( cphi[0], cdens[n-1]);
for( unsigned k=n-1; k>0; k--)
swap_fields( cdens[k], cdens[k-1]);
//now solve for cdens[0]
for( unsigned i=0; i<crows; i++)
for( unsigned j=0; j<ccols; j++)
{
cdens[0](i,j) = cphi[0](i,j)/phi_coeff(i,j)[0];
for( unsigned k=0; k<n && k!=0; k++)
cdens[0](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[0];
}
break;
case( IONS):
//bring cdens and cphi in the right order
swap_fields( cphi[0], cdens[n-1]);
for( unsigned k=n-1; k>1; k--)
swap_fields( cdens[k], cdens[k-1]);
//solve for cdens[1]
for( unsigned i=0; i<crows; i++)
for( unsigned j=0; j<ccols; j++)
{
cdens[1](i,j) = cphi[0](i,j) /phi_coeff(i,j)[1];
for( unsigned k=0; k<n && k!=1; k++)
cdens[1](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[1];
}
break;
case( IMPURITIES):
//bring cdens and cphi in the right order
swap_fields( cphi[0], cdens[n-1]);
for( unsigned k=n-1; k>2; k--) //i.e. never for n = 3
swap_fields( cdens[k], cdens[k-1]);
//solve for cdens[2]
for( unsigned i=0; i<crows; i++)
for( unsigned j=0; j<ccols; j++)
{
cdens[2](i,j) = cphi[0](i,j) /phi_coeff(i,j)[2];
for( unsigned k=0; k<n && k!=2; k++)
cdens[2](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[2];
}
break;
case( POTENTIAL):
//solve for cphi
for( unsigned i=0; i<crows; i++)
for( unsigned j=0; j<ccols; j++)
{
cphi[0](i,j) = 0;
for( unsigned k=0; k<n && k!=2; k++)
cphi[0](i,j) += cdens[k](i,j)*phi_coeff(i,j)[k];
}
break;
case( ALL):
throw Message( "spectral::ALL not treated yet!", _ping_);
}
//compute the rest cphi[k]
for( unsigned k=0; k<n-1; k++)
for( size_t i = 0; i < crows; i++)
for( size_t j = 0; j < ccols; j++)
cphi[k+1](i,j) = gamma_coeff[k](i,j)*cphi[0](i,j);
//backtransform to x-space
for( unsigned k=0; k<n; k++)
{
//set (0,0) mode 0 again
cdens[k](0,0) = 0;
cphi[k](0,0) = 0;
dft_dft.c2r( cdens[k], dens[k]);
dft_dft.c2r( cphi[k], phi[k]);
}
//now the density and the potential is given in x-space
//first_steps();
}
template< size_t n>
void DFT_DFT_Solver<n>::getField( Matrix<double, TL_DFT>& m, enum target t)
{
#ifdef TL_DEBUG
if(m.isVoid())
throw Message( "You may not swap in a void Matrix!\n", _ping_);
#endif
switch( t)
{
case( ELECTRONS): swap_fields( m, nonlinear[0]); break;
case( IONS): swap_fields( m, nonlinear[1]); break;
case( IMPURITIES): swap_fields( m, nonlinear[2]); break;
case( POTENTIAL): swap_fields( m, cphi[0]); break;
case( ALL): throw Message( "spectral::ALL not allowed here", _ping_);
}
}
template< size_t n>
const Matrix<double, TL_DFT>& DFT_DFT_Solver<n>::getField( enum target t) const
{
Matrix<double, TL_DFT> const * m = 0;
switch( t)
{
case( ELECTRONS): m = &dens[0]; break;
case( IONS): m = &dens[1]; break;
case( IMPURITIES): m = &dens[2]; break;
case( POTENTIAL): m = &phi[0]; break;
case( ALL): throw Message( "spectral::ALL not allowed here", _ping_);
}
return *m;
}
template< size_t n>
void DFT_DFT_Solver<n>::first_step()
{
karniadakis.template invert_coeff<TL_EULER>( );
Matrix<double, TL_DFT> voidmatrix( rows, cols,(bool)TL_VOID);
step_<TL_EULER>(voidmatrix);
}
template< size_t n>
void DFT_DFT_Solver<n>::second_step()
{
Matrix<double, TL_DFT> voidmatrix( rows, cols, (bool)TL_VOID);
karniadakis.template invert_coeff<TL_ORDER2>();
step_<TL_ORDER2>(voidmatrix);
karniadakis.template invert_coeff<TL_ORDER3>();
}
template< size_t n>
void DFT_DFT_Solver<n>::compute_cphi()
{
if( n==2)
{
#pragma omp parallel for
for( size_t i = 0; i < crows; i++){
for( size_t j = 0; j < ccols; j++)
cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j)
+ phi_coeff(i,j)[1]*cdens[1](i,j);
}
//#pragma omp barrier
#pragma omp parallel for
for( size_t i = 0; i < crows; i++){
for( size_t j = 0; j < ccols; j++)
cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j);
}
//#pragma omp barrier
}
else if( n==3)
{
#pragma omp parallel for
for( size_t i = 0; i < crows; i++){
for( size_t j = 0; j < ccols; j++)
cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j)
+ phi_coeff(i,j)[1]*cdens[1](i,j)
+ phi_coeff(i,j)[2]*cdens[2](i,j);
}
//#pragma omp barrier
#pragma omp parallel for
for( size_t i = 0; i < crows; i++){
for( size_t j = 0; j < ccols; j++)
{
cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j);
cphi[2](i,j) = gamma_coeff[1](i,j)*cphi[0](i,j);
}
}
//#pragma omp barrier
}
}
template< size_t n>
template< enum stepper S>
void DFT_DFT_Solver<n>::step_(const Matrix<double, TL_DFT>& src)
{
//1.0 Compute nonlinearity
#pragma omp parallel for
for( unsigned k=0; k<n; k++)
{
GhostMatrix<double, TL_DFT> ghostdens{ rows, cols, TL_PERIODIC, TL_PERIODIC};
GhostMatrix<double, TL_DFT> ghostphi{ rows, cols, TL_PERIODIC, TL_PERIODIC};
swap_fields( dens[k], ghostdens); //now dens[k] is void
swap_fields( phi[k], ghostphi); //now phi[k] is void
ghostdens.initGhostCells( );
ghostphi.initGhostCells( );
arakawa( ghostdens, ghostphi, nonlinear[k]);
swap_fields( dens[k], ghostdens); //now ghostdens is void
swap_fields( phi[k], ghostphi); //now ghostphi is void
}
//1.1. Add source term
if( !src.isVoid())
for( unsigned i=0; i<rows; i++)
for( unsigned j=0; j<cols; j++)
nonlinear[0](i,j) += src(i,j);
//2. perform karniadakis step
karniadakis.template step_i<S>( dens, nonlinear);
//3. solve linear equation
//3.1. transform v_hut
#pragma omp parallel for
for( unsigned k=0; k<n; k++){
dft_dft.r2c( dens[k], cdens[k]);}
//3.2. perform karniadaksi step and multiply coefficients for phi
karniadakis.step_ii( cdens);
compute_cphi();
//3.3. backtransform
#pragma omp parallel for
for( unsigned k=0; k<n; k++)
{
dft_dft.c2r( cdens[k], dens[k]);
dft_dft.c2r( cphi[k], phi[k]);
}
}
} //namespace spectral
#endif //_DFT_DFT_SOLVER_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.