source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
deconvolution_pack8to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = (y * kernel_w + x) * 32;
__m128 _val0 = _mm_broadcast_ss(sptr);
__m128 _val1 = _mm_broadcast_ss(sptr + 1);
__m128 _val2 = _mm_broadcast_ss(sptr + 2);
__m128 _val3 = _mm_broadcast_ss(sptr + 3);
__m128 _val4 = _mm_broadcast_ss(sptr + 4);
__m128 _val5 = _mm_broadcast_ss(sptr + 5);
__m128 _val6 = _mm_broadcast_ss(sptr + 6);
__m128 _val7 = _mm_broadcast_ss(sptr + 7);
__m128 _w0 = _mm_load_ps(kptr + k);
__m128 _w1 = _mm_load_ps(kptr + k + 4);
__m128 _w2 = _mm_load_ps(kptr + k + 8);
__m128 _w3 = _mm_load_ps(kptr + k + 12);
__m128 _w4 = _mm_load_ps(kptr + k + 16);
__m128 _w5 = _mm_load_ps(kptr + k + 20);
__m128 _w6 = _mm_load_ps(kptr + k + 24);
__m128 _w7 = _mm_load_ps(kptr + k + 28);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
_sum = _mm_comp_fmadd_ps(_val4, _w4, _sum);
_sum = _mm_comp_fmadd_ps(_val5, _w5, _sum);
_sum = _mm_comp_fmadd_ps(_val6, _w6, _sum);
_sum = _mm_comp_fmadd_ps(_val7, _w7, _sum);
}
}
kptr += maxk * 32;
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
|
clean.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef __VCGLIB_CLEAN
#define __VCGLIB_CLEAN
// VCG headers
#include <vcg/complex/complex.h>
#include <vcg/simplex/face/pos.h>
#include <vcg/simplex/face/topology.h>
#include <vcg/simplex/edge/topology.h>
#include <vcg/complex/algorithms/closest.h>
#include <vcg/space/index/grid_static_ptr.h>
#include <vcg/space/index/spatial_hashing.h>
#include <vcg/complex/algorithms/update/selection.h>
#include <vcg/complex/algorithms/update/flag.h>
#include <vcg/complex/algorithms/update/normal.h>
#include <vcg/complex/algorithms/update/topology.h>
#include <vcg/space/triangle3.h>
namespace vcg {
namespace tri{
template <class ConnectedMeshType>
class ConnectedComponentIterator
{
public:
typedef ConnectedMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
public:
void operator ++()
{
FacePointer fpt=sf.top();
sf.pop();
for(int j=0;j<3;++j)
if( !face::IsBorder(*fpt,j) )
{
FacePointer l=fpt->FFp(j);
if( !tri::IsMarked(*mp,l) )
{
tri::Mark(*mp,l);
sf.push(l);
}
}
}
void start(MeshType &m, FacePointer p)
{
tri::RequirePerFaceMark(m);
mp=&m;
while(!sf.empty()) sf.pop();
UnMarkAll(m);
assert(p);
assert(!p->IsD());
tri::Mark(m,p);
sf.push(p);
}
bool completed() {
return sf.empty();
}
FacePointer operator *()
{
return sf.top();
}
private:
std::stack<FacePointer> sf;
MeshType *mp;
};
///
/** \addtogroup trimesh */
/*@{*/
/// Class of static functions to clean//restore meshs.
template <class CleanMeshType>
class Clean
{
public:
typedef CleanMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ConstVertexIterator ConstVertexIterator;
typedef typename MeshType::EdgeIterator EdgeIterator;
typedef typename MeshType::EdgePointer EdgePointer;
typedef typename MeshType::CoordType CoordType;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
typedef typename vcg::Box3<ScalarType> Box3Type;
typedef GridStaticPtr<FaceType, ScalarType > TriMeshGrid;
/* classe di confronto per l'algoritmo di eliminazione vertici duplicati*/
class RemoveDuplicateVert_Compare{
public:
inline bool operator()(VertexPointer const &a, VertexPointer const &b)
{
return ((*a).cP() == (*b).cP()) ? (a<b): ((*a).cP() < (*b).cP());
}
};
/** This function removes all duplicate vertices of the mesh by looking only at their spatial positions.
* Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
* the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateVertex( MeshType & m, bool RemoveDegenerateFlag=true) // V1.0
{
if(m.vert.size()==0 || m.vn==0) return 0;
std::map<VertexPointer, VertexPointer> mp;
size_t i,j;
VertexIterator vi;
int deleted=0;
int k=0;
size_t num_vert = m.vert.size();
std::vector<VertexPointer> perm(num_vert);
for(vi=m.vert.begin(); vi!=m.vert.end(); ++vi, ++k)
perm[k] = &(*vi);
RemoveDuplicateVert_Compare c_obj;
std::sort(perm.begin(),perm.end(),c_obj);
j = 0;
i = j;
mp[perm[i]] = perm[j];
++i;
for(;i!=num_vert;)
{
if( (! (*perm[i]).IsD()) &&
(! (*perm[j]).IsD()) &&
(*perm[i]).P() == (*perm[j]).cP() )
{
VertexPointer t = perm[i];
mp[perm[i]] = perm[j];
++i;
Allocator<MeshType>::DeleteVertex(m,*t);
deleted++;
}
else
{
j = i;
++i;
}
}
for(FaceIterator fi = m.face.begin(); fi!=m.face.end(); ++fi)
if( !(*fi).IsD() )
for(k = 0; k < (*fi).VN(); ++k)
if( mp.find( (typename MeshType::VertexPointer)(*fi).V(k) ) != mp.end() )
{
(*fi).V(k) = &*mp[ (*fi).V(k) ];
}
for(EdgeIterator ei = m.edge.begin(); ei!=m.edge.end(); ++ei)
if( !(*ei).IsD() )
for(k = 0; k < 2; ++k)
if( mp.find( (typename MeshType::VertexPointer)(*ei).V(k) ) != mp.end() )
{
(*ei).V(k) = &*mp[ (*ei).V(k) ];
}
if(RemoveDegenerateFlag) RemoveDegenerateFace(m);
if(RemoveDegenerateFlag && m.en>0) {
RemoveDegenerateEdge(m);
RemoveDuplicateEdge(m);
}
return deleted;
}
class SortedPair
{
public:
SortedPair() {}
SortedPair(unsigned int v0, unsigned int v1, EdgePointer _fp)
{
v[0]=v0;v[1]=v1;
fp=_fp;
if(v[0]>v[1]) std::swap(v[0],v[1]);
}
bool operator < (const SortedPair &p) const
{
return (v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedPair &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) ) return true;
return false;
}
unsigned int v[2];
EdgePointer fp;
};
class SortedTriple
{
public:
SortedTriple() {}
SortedTriple(unsigned int v0, unsigned int v1, unsigned int v2,FacePointer _fp)
{
v[0]=v0;v[1]=v1;v[2]=v2;
fp=_fp;
std::sort(v,v+3);
}
bool operator < (const SortedTriple &p) const
{
return (v[2]!=p.v[2])?(v[2]<p.v[2]):
(v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedTriple &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) && (v[2]==s.v[2]) ) return true;
return false;
}
unsigned int v[3];
FacePointer fp;
};
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateFace( MeshType & m) // V1.0
{
std::vector<SortedTriple> fvec;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
fvec.push_back(SortedTriple( tri::Index(m,(*fi).V(0)),
tri::Index(m,(*fi).V(1)),
tri::Index(m,(*fi).V(2)),
&*fi));
}
assert (size_t(m.fn) == fvec.size());
std::sort(fvec.begin(),fvec.end());
int total=0;
for(int i=0;i<int(fvec.size())-1;++i)
{
if(fvec[i]==fvec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteFace(m, *(fvec[i].fp) );
}
}
return total;
}
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateEdge( MeshType & m) // V1.0
{
if (m.en==0) return 0;
std::vector<SortedPair> eVec;
for(EdgeIterator ei=m.edge.begin();ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
eVec.push_back(SortedPair( tri::Index(m,(*ei).V(0)), tri::Index(m,(*ei).V(1)), &*ei));
}
assert (size_t(m.en) == eVec.size());
//for(int i=0;i<fvec.size();++i) qDebug("fvec[%i] = (%i %i %i)(%i)",i,fvec[i].v[0],fvec[i].v[1],fvec[i].v[2],tri::Index(m,fvec[i].fp));
std::sort(eVec.begin(),eVec.end());
int total=0;
for(int i=0;i<int(eVec.size())-1;++i)
{
if(eVec[i]==eVec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteEdge(m, *(eVec[i].fp) );
//qDebug("deleting face %i (pos in fvec %i)",tri::Index(m,fvec[i].fp) ,i);
}
}
return total;
}
static int CountUnreferencedVertex( MeshType& m)
{
return RemoveUnreferencedVertex(m,false);
}
/** This function removes that are not referenced by any face. The function updates the vn counter.
@param m The mesh
@return The number of removed vertices
*/
static int RemoveUnreferencedVertex( MeshType& m, bool DeleteVertexFlag=true) // V1.0
{
FaceIterator fi;
EdgeIterator ei;
VertexIterator vi;
int referredBit = VertexType::NewBitFlag();
int j;
int deleted = 0;
for(vi=m.vert.begin();vi!=m.vert.end();++vi)
(*vi).ClearUserBit(referredBit);
for(fi=m.face.begin();fi!=m.face.end();++fi)
if( !(*fi).IsD() )
for(j=0;j<(*fi).VN();++j)
(*fi).V(j)->SetUserBit(referredBit);
for(ei=m.edge.begin();ei!=m.edge.end();++ei)
if( !(*ei).IsD() ){
(*ei).V(0)->SetUserBit(referredBit);
(*ei).V(1)->SetUserBit(referredBit);
}
for(vi=m.vert.begin();vi!=m.vert.end();++vi)
if( (!(*vi).IsD()) && (!(*vi).IsUserBit(referredBit)))
{
if(DeleteVertexFlag) Allocator<MeshType>::DeleteVertex(m,*vi);
++deleted;
}
VertexType::DeleteBitFlag(referredBit);
return deleted;
}
/**
Degenerate vertices are vertices that have coords with invalid floating point values,
All the faces incident on deleted vertices are also deleted
*/
static int RemoveDegenerateVertex(MeshType& m)
{
VertexIterator vi;
int count_vd = 0;
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(math::IsNAN( (*vi).P()[0]) ||
math::IsNAN( (*vi).P()[1]) ||
math::IsNAN( (*vi).P()[2]) )
{
count_vd++;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
FaceIterator fi;
int count_fd = 0;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
if( (*fi).V(0)->IsD() ||
(*fi).V(1)->IsD() ||
(*fi).V(2)->IsD() )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
return count_vd;
}
/**
Degenerate faces are faces that are Topologically degenerate,
i.e. have two or more vertex reference that link the same vertex
(and not only two vertexes with the same coordinates).
All Degenerate faces are zero area faces BUT not all zero area faces are degenerate.
We do not take care of topology because when we have degenerate faces the
topology calculation functions crash.
*/
static int RemoveDegenerateFace(MeshType& m)
{
int count_fd = 0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
if((*fi).V(0) == (*fi).V(1) ||
(*fi).V(0) == (*fi).V(2) ||
(*fi).V(1) == (*fi).V(2) )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
}
return count_fd;
}
static int RemoveDegenerateEdge(MeshType& m)
{
int count_ed = 0;
for(EdgeIterator ei=m.edge.begin(); ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
if((*ei).V(0) == (*ei).V(1) )
{
count_ed++;
Allocator<MeshType>::DeleteEdge(m,*ei);
}
}
return count_ed;
}
static int RemoveNonManifoldVertex(MeshType& m)
{
CountNonManifoldVertexFF(m,true);
tri::UpdateSelection<MeshType>::FaceFromVertexLoose(m);
int count_removed = 0;
FaceIterator fi;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD() && (*fi).IsS())
Allocator<MeshType>::DeleteFace(m,*fi);
VertexIterator vi;
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(!(*vi).IsD() && (*vi).IsS()) {
++count_removed;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
return count_removed;
}
static int SplitSelectedVertexOnEdgeMesh(MeshType& m)
{
tri::RequireCompactness(m);
tri::UpdateFlags<MeshType>::VertexClearV(m);
int count_split = 0;
for(size_t i=0;i<m.edge.size();++i)
{
for(int j=0;j<2;++j)
{
VertexPointer vp = m.edge[i].V(j);
if(vp->IsS())
{
if(!vp->IsV())
{
m.edge[i].V(j) = &*(tri::Allocator<MeshType>::AddVertex(m,vp->P()));
++count_split;
}
else
{
vp->SetV();
}
}
}
}
return count_split;
}
static void SelectNonManifoldVertexOnEdgeMesh(MeshType &m)
{
tri::RequireCompactness(m);
tri::UpdateSelection<MeshType>::VertexClear(m);
std::vector<int> cnt(m.vn,0);
for(size_t i=0;i<m.edge.size();++i)
{
cnt[tri::Index(m,m.edge[i].V(0))]++;
cnt[tri::Index(m,m.edge[i].V(1))]++;
}
for(size_t i=0;i<m.vert.size();++i)
if(cnt[i]>2) m.vert[i].SetS();
}
static void SelectCreaseVertexOnEdgeMesh(MeshType &m, ScalarType AngleRadThr)
{
tri::RequireCompactness(m);
tri::RequireVEAdjacency(m);
tri::UpdateTopology<MeshType>::VertexEdge(m);
for(size_t i=0;i<m.vert.size();++i)
{
std::vector<VertexPointer> VVStarVec;
edge::VVStarVE(&(m.vert[i]),VVStarVec);
if(VVStarVec.size()==2)
{
CoordType v0 = m.vert[i].P() - VVStarVec[0]->P();
CoordType v1 = m.vert[i].P() - VVStarVec[1]->P();
float angle = M_PI-vcg::Angle(v0,v1);
if(angle > AngleRadThr) m.vert[i].SetS();
}
}
}
/// Removal of faces that were incident on a non manifold edge.
// Given a mesh with FF adjacency
// it search for non manifold vertices and duplicate them.
// Duplicated vertices are moved apart according to the move threshold param.
// that is a percentage of the average vector from the non manifold vertex to the barycenter of the incident faces.
static int SplitNonManifoldVertex(MeshType& m, ScalarType moveThreshold)
{
RequireFFAdjacency(m);
typedef std::pair<FacePointer,int> FaceInt; // a face and the index of the vertex that we have to change
//
std::vector<std::pair<VertexPointer, std::vector<FaceInt> > >ToSplitVec;
SelectionStack<MeshType> ss(m);
ss.push();
CountNonManifoldVertexFF(m,true);
UpdateFlags<MeshType>::VertexClearV(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;i++)
if((*fi).V(i)->IsS() && !(*fi).V(i)->IsV())
{
(*fi).V(i)->SetV();
face::Pos<FaceType> startPos(&*fi,i);
face::Pos<FaceType> curPos = startPos;
std::set<FaceInt> faceSet;
do
{
faceSet.insert(std::make_pair(curPos.F(),curPos.VInd()));
curPos.NextE();
} while (curPos != startPos);
ToSplitVec.push_back(make_pair((*fi).V(i),std::vector<FaceInt>()));
typename std::set<FaceInt>::const_iterator iii;
for(iii=faceSet.begin();iii!=faceSet.end();++iii)
ToSplitVec.back().second.push_back(*iii);
}
}
ss.pop();
// Second step actually add new vertices and split them.
typename tri::Allocator<MeshType>::template PointerUpdater<VertexPointer> pu;
VertexIterator firstVp = tri::Allocator<MeshType>::AddVertices(m,ToSplitVec.size(),pu);
for(size_t i =0;i<ToSplitVec.size();++i)
{
// qDebug("Splitting Vertex %i",ToSplitVec[i].first-&*m.vert.begin());
VertexPointer np=ToSplitVec[i].first;
pu.Update(np);
firstVp->ImportData(*np);
// loop on the face to be changed, and also compute the movement vector;
CoordType delta(0,0,0);
for(size_t j=0;j<ToSplitVec[i].second.size();++j)
{
FaceInt ff=ToSplitVec[i].second[j];
ff.first->V(ff.second)=&*firstVp;
delta+=Barycenter(*(ff.first))-np->cP();
}
delta /= ToSplitVec[i].second.size();
firstVp->P() = firstVp->P() + delta * moveThreshold;
firstVp++;
}
return ToSplitVec.size();
}
// Auxiliary function for sorting the non manifold faces according to their area. Used in RemoveNonManifoldFace
struct CompareAreaFP {
bool operator ()(FacePointer const& f1, FacePointer const& f2) const {
return DoubleArea(*f1) < DoubleArea(*f2);
}
};
/// Removal of faces that were incident on a non manifold edge.
static int RemoveNonManifoldFace(MeshType& m)
{
FaceIterator fi;
int count_fd = 0;
std::vector<FacePointer> ToDelVec;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if (!fi->IsD())
{
if ((!IsManifold(*fi,0))||
(!IsManifold(*fi,1))||
(!IsManifold(*fi,2)))
ToDelVec.push_back(&*fi);
}
std::sort(ToDelVec.begin(),ToDelVec.end(),CompareAreaFP());
for(size_t i=0;i<ToDelVec.size();++i)
{
if(!ToDelVec[i]->IsD())
{
FaceType &ff= *ToDelVec[i];
if ((!IsManifold(ff,0))||
(!IsManifold(ff,1))||
(!IsManifold(ff,2)))
{
for(int j=0;j<3;++j)
if(!face::IsBorder<FaceType>(ff,j))
vcg::face::FFDetach<FaceType>(ff,j);
Allocator<MeshType>::DeleteFace(m,ff);
count_fd++;
}
}
}
return count_fd;
}
/*
The following functions remove faces that are geometrically "bad" according to edges and area criteria.
They remove the faces that are out of a given range of area or edges (e.g. faces too large or too small, or with edges too short or too long)
but that could be topologically correct.
These functions can optionally take into account only the selected faces.
*/
template<bool Selected>
static int RemoveFaceOutOfRangeAreaSel(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)())
{
FaceIterator fi;
int count_fd = 0;
MinAreaThr*=2;
MaxAreaThr*=2;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
if(!Selected || (*fi).IsS())
{
const ScalarType doubleArea=DoubleArea<FaceType>(*fi);
if((doubleArea<=MinAreaThr) || (doubleArea>=MaxAreaThr) )
{
Allocator<MeshType>::DeleteFace(m,*fi);
count_fd++;
}
}
return count_fd;
}
// alias for the old style. Kept for backward compatibility
static int RemoveZeroAreaFace(MeshType& m) { return RemoveFaceOutOfRangeArea(m);}
// Aliases for the functions that do not look at selection
static int RemoveFaceOutOfRangeArea(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)())
{
return RemoveFaceOutOfRangeAreaSel<false>(m,MinAreaThr,MaxAreaThr);
}
/**
* Is the mesh only composed by quadrilaterals?
*/
static bool IsBitQuadOnly(const MeshType &m)
{
typedef typename MeshType::FaceType F;
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->Flags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp != F::FAUX0 && tmp != F::FAUX1 && tmp != F::FAUX2) return false;
}
return true;
}
static bool IsFaceFauxConsistent(MeshType &m)
{
RequirePerFaceFlags(m);
RequireFFAdjacency(m);
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
for(int z=0;z<(*fi).VN();++z)
{
FacePointer fp = fi->FFp(z);
int zp = fi->FFi(z);
if(fi->IsF(z) != fp->IsF(zp)) return false;
}
}
return true;
}
/**
* Is the mesh only composed by triangles? (non polygonal faces)
*/
static bool IsBitTriOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) {
if ( !fi->IsD() && fi->IsAnyF() ) return false;
}
return true;
}
static bool IsBitPolygonal(const MeshType &m){
return !IsBitTriOnly(m);
}
/**
* Is the mesh only composed by quadrilaterals and triangles? (no pentas, etc)
* It assumes that the bits are consistent. In that case there can be only a single faux edge.
*/
static bool IsBitTriQuadOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp!=F::FAUX0 && tmp!=F::FAUX1 && tmp!=F::FAUX2 && tmp!=0 ) return false;
}
return true;
}
/**
* How many quadrilaterals?
* It assumes that the bits are consistent. In that case we count the tris with a single faux edge and divide by two.
*/
static int CountBitQuads(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp==F::FAUX0 || tmp==F::FAUX1 || tmp==F::FAUX2) count++;
}
return count / 2;
}
/**
* How many triangles? (non polygonal faces)
*/
static int CountBitTris(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (!(fi->IsAnyF())) count++;
}
return count;
}
/**
* How many polygons of any kind? (including triangles)
* it assumes that there are no faux vertexes (e.g vertices completely surrounded by faux edges)
*/
static int CountBitPolygons(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count = 0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (fi->IsF(0)) count++;
if (fi->IsF(1)) count++;
if (fi->IsF(2)) count++;
}
return m.fn - count/2;
}
/**
* The number of polygonal faces is
* FN - EN_f (each faux edge hides exactly one triangular face or in other words a polygon of n edges has n-3 faux edges.)
* In the general case where a The number of polygonal faces is
* FN - EN_f + VN_f
* where:
* EN_f is the number of faux edges.
* VN_f is the number of faux vertices (e.g vertices completely surrounded by faux edges)
* as a intuitive proof think to a internal vertex that is collapsed onto a border of a polygon:
* it deletes 2 faces, 1 faux edges and 1 vertex so to keep the balance you have to add back the removed vertex.
*/
static int CountBitLargePolygons(MeshType &m)
{
tri::RequirePerFaceFlags(m);
UpdateFlags<MeshType>::VertexSetV(m);
// First loop Clear all referenced vertices
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i) fi->V(i)->ClearV();
// Second Loop, count (twice) faux edges and mark all vertices touched by non faux edges
// (e.g vertexes on the boundary of a polygon)
int countE = 0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD()) {
for(int i=0;i<3;++i)
{
if (fi->IsF(i))
countE++;
else
{
fi->V0(i)->SetV();
fi->V1(i)->SetV();
}
}
}
// Third Loop, count the number of referenced vertexes that are completely surrounded by faux edges.
int countV = 0;
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if (!vi->IsD() && !vi->IsV()) countV++;
return m.fn - countE/2 + countV ;
}
/**
* Checks that the mesh has consistent per-face faux edges
* (the ones that merges triangles into larger polygons).
* A border edge should never be faux, and faux edges should always be
* reciprocated by another faux edges.
* It requires FF adjacency.
*/
static bool HasConsistentPerFaceFauxFlag(const MeshType &m)
{
RequireFFAdjacency(m);
RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
for (int k=0; k<3; k++)
if( ( fi->IsF(k) != fi->cFFp(k)->IsF(fi->cFFi(k)) ) ||
( fi->IsF(k) && face::IsBorder(*fi,k)) )
{
return false;
}
return true;
}
/**
* Count the number of non manifold edges in a polylinemesh, e.g. the edges where there are more than 2 incident faces.
*
*/
static int CountNonManifoldEdgeEE( MeshType & m, bool SelectFlag=false)
{
assert(m.fn == 0 && m.en >0); // just to be sure we are using an edge mesh...
RequireEEAdjacency(m);
tri::UpdateTopology<MeshType>::EdgeEdge(m);
if(SelectFlag) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
EdgeIterator ei;
for (ei = m.edge.begin(); ei != m.edge.end(); ++ei) if (!ei->IsD())
{
TD[(*ei).V(0)]++;
TD[(*ei).V(1)]++;
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop, Check that each vertex have been seen 1 or 2 times.
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD())
{
if( TD[vi] >2 )
{
if(SelectFlag) (*vi).SetS();
nonManifoldCnt++;
}
}
return nonManifoldCnt;
}
/**
* Count the number of non manifold edges in a mesh, e.g. the edges where there are more than 2 incident faces.
*
* Note that this test is not enough to say that a mesh is two manifold,
* you have to count also the non manifold vertexes.
*/
static int CountNonManifoldEdgeFF( MeshType & m, bool SelectFlag=false)
{
RequireFFAdjacency(m);
int nmfBit[3];
nmfBit[0]= FaceType::NewBitFlag();
nmfBit[1]= FaceType::NewBitFlag();
nmfBit[2]= FaceType::NewBitFlag();
UpdateFlags<MeshType>::FaceClear(m,nmfBit[0]+nmfBit[1]+nmfBit[2]);
if(SelectFlag){
UpdateSelection<MeshType>::VertexClear(m);
UpdateSelection<MeshType>::FaceClear(m);
}
int edgeCnt = 0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD())
{
for(int i=0;i<3;++i)
if(!IsManifold(*fi,i))
{
if(!(*fi).IsUserBit(nmfBit[i]))
{
++edgeCnt;
if(SelectFlag)
{
(*fi).V0(i)->SetS();
(*fi).V1(i)->SetS();
}
// follow the ring of faces incident on edge i;
face::Pos<FaceType> nmf(&*fi,i);
do
{
if(SelectFlag) nmf.F()->SetS();
nmf.F()->SetUserBit(nmfBit[nmf.E()]);
nmf.NextF();
}
while(nmf.f != &*fi);
}
}
}
}
return edgeCnt;
}
/** Count (and eventually select) non 2-Manifold vertexes of a mesh
* e.g. the vertices with a non 2-manif. neighbourhood but that do not belong to not 2-manif edges.
* typical situation two cones connected by one vertex.
*/
static int CountNonManifoldVertexFF( MeshType & m, bool selectVert = true )
{
RequireFFAdjacency(m);
if(selectVert) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
FaceIterator fi;
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
TD[(*fi).V(0)]++;
TD[(*fi).V(1)]++;
TD[(*fi).V(2)]++;
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop.
// mark out of the game the vertexes that are incident on non manifold edges.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;++i)
if (!IsManifold(*fi,i)) {
(*fi).V0(i)->SetV();
(*fi).V1(i)->SetV();
}
}
// Third Loop, for safe vertexes, check that the number of faces that you can reach starting
// from it and using FF is the same of the previously counted.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;i++) if(!(*fi).V(i)->IsV()){
(*fi).V(i)->SetV();
face::Pos<FaceType> pos(&(*fi),i);
int starSizeFF = pos.NumberOfIncidentFaces();
if (starSizeFF != TD[(*fi).V(i)])
{
if(selectVert) (*fi).V(i)->SetS();
nonManifoldCnt++;
}
}
}
return nonManifoldCnt;
}
/// Very simple test of water tightness. No boundary and no non manifold edges.
/// Assume that it is orientable.
/// It could be debated if a closed non orientable surface is watertight or not.
///
/// The rationale of not testing orientability here is that
/// it requires FFAdj while this test do not require any adjacency.
///
static bool IsWaterTight(MeshType & m)
{
int edgeNum=0,edgeBorderNum=0,edgeNonManifNum=0;
CountEdgeNum(m, edgeNum, edgeBorderNum,edgeNonManifNum);
return (edgeBorderNum==0) && (edgeNonManifNum==0);
}
static void CountEdgeNum( MeshType & m, int &total_e, int &boundary_e, int &non_manif_e )
{
std::vector< typename tri::UpdateTopology<MeshType>::PEdge > edgeVec;
tri::UpdateTopology<MeshType>::FillEdgeVector(m,edgeVec,true);
sort(edgeVec.begin(), edgeVec.end()); // Lo ordino per vertici
total_e=0;
boundary_e=0;
non_manif_e=0;
size_t f_on_cur_edge =1;
for(size_t i=0;i<edgeVec.size();++i)
{
if(( (i+1) == edgeVec.size()) || !(edgeVec[i] == edgeVec[i+1]))
{
++total_e;
if(f_on_cur_edge==1)
++boundary_e;
if(f_on_cur_edge>2)
++non_manif_e;
f_on_cur_edge=1;
}
else
{
++f_on_cur_edge;
}
} // end for
}
static int CountHoles( MeshType & m)
{
int numholev=0;
FaceIterator fi;
FaceIterator gi;
vcg::face::Pos<FaceType> he;
vcg::face::Pos<FaceType> hei;
std::vector< std::vector<CoordType> > holes; //indices of vertices
vcg::tri::UpdateFlags<MeshType>::VertexClearS(m);
gi=m.face.begin(); fi=gi;
for(fi=m.face.begin();fi!=m.face.end();fi++)//for all faces do
{
for(int j=0;j<3;j++)//for all edges
{
if(fi->V(j)->IsS()) continue;
if(face::IsBorder(*fi,j))//found an unvisited border edge
{
he.Set(&(*fi),j,fi->V(j)); //set the face-face iterator to the current face, edge and vertex
std::vector<CoordType> hole; //start of a new hole
hole.push_back(fi->P(j)); // including the first vertex
numholev++;
he.v->SetS(); //set the current vertex as selected
he.NextB(); //go to the next boundary edge
while(fi->V(j) != he.v)//will we do not encounter the first boundary edge.
{
CoordType newpoint = he.v->P(); //select its vertex.
if(he.v->IsS())//check if this vertex was selected already, because then we have an additional hole.
{
//cut and paste the additional hole.
std::vector<CoordType> hole2;
int index = static_cast<int>(find(hole.begin(),hole.end(),newpoint)
- hole.begin());
for(unsigned int i=index; i<hole.size(); i++)
hole2.push_back(hole[i]);
hole.resize(index);
if(hole2.size()!=0) //annoying in degenerate cases
holes.push_back(hole2);
}
hole.push_back(newpoint);
numholev++;
he.v->SetS(); //set the current vertex as selected
he.NextB(); //go to the next boundary edge
}
holes.push_back(hole);
}
}
}
return static_cast<int>(holes.size());
}
/*
Compute the set of connected components of a given mesh
it fills a vector of pair < int , faceptr > with, for each connecteed component its size and a represnant
*/
static int CountConnectedComponents(MeshType &m)
{
std::vector< std::pair<int,FacePointer> > CCV;
return ConnectedComponents(m,CCV);
}
static int ConnectedComponents(MeshType &m, std::vector< std::pair<int,FacePointer> > &CCV)
{
tri::RequireFFAdjacency(m);
CCV.clear();
tri::UpdateSelection<MeshType>::FaceClear(m);
std::stack<FacePointer> sf;
FacePointer fpt=&*(m.face.begin());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()) && !(*fi).IsS())
{
(*fi).SetS();
CCV.push_back(std::make_pair(0,&*fi));
sf.push(&*fi);
while (!sf.empty())
{
fpt=sf.top();
++CCV.back().first;
sf.pop();
for(int j=0;j<3;++j)
{
if( !face::IsBorder(*fpt,j) )
{
FacePointer l = fpt->FFp(j);
if( !(*l).IsS() )
{
(*l).SetS();
sf.push(l);
}
}
}
}
}
}
return int(CCV.size());
}
static void ComputeValence( MeshType &m, typename MeshType::PerVertexIntHandle &h)
{
for(VertexIterator vi=m.vert.begin(); vi!= m.vert.end();++vi)
h[vi]=0;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()))
for(int j=0;j<fi->VN();j++)
++h[tri::Index(m,fi->V(j))];
}
}
/**
GENUS.
A topologically invariant property of a surface defined as
the largest number of non-intersecting simple closed curves that can be
drawn on the surface without separating it.
Roughly speaking, it is the number of holes in a surface.
The genus g of a closed surface, also called the geometric genus, is related to the
Euler characteristic by the relation $chi$ by $chi==2-2g$.
The genus of a connected, orientable surface is an integer representing the maximum
number of cuttings along closed simple curves without rendering the resultant
manifold disconnected. It is equal to the number of handles on it.
For general polyhedra the <em>Euler Formula</em> is:
V - E + F = 2 - 2G - B
where V is the number of vertices, F is the number of faces, E is the
number of edges, G is the genus and B is the number of <em>boundary polygons</em>.
The above formula is valid for a mesh with one single connected component.
By considering multiple connected components the formula becomes:
V - E + F = 2C - 2Gs - B -> 2Gs = - ( V-E+F +B -2C)
where C is the number of connected components and Gs is the sum of
the genus of all connected components.
Note that in the case of a mesh with boundaries the intuitive meaning of Genus is less intuitive that it could seem.
A closed sphere, a sphere with one hole (e.g. a disk) and a sphere with two holes (e.g. a tube) all of them have Genus == 0
*/
static int MeshGenus(int nvert,int nedges,int nfaces, int numholes, int numcomponents)
{
return -((nvert + nfaces - nedges + numholes - 2 * numcomponents) / 2);
}
static int MeshGenus(MeshType &m)
{
int nvert=m.vn;
int nfaces=m.fn;
int boundary_e,total_e,nonmanif_e;
CountEdgeNum(m,total_e,boundary_e,nonmanif_e);
int numholes=CountHoles(m);
int numcomponents=CountConnectedComponents(m);
int G=MeshGenus(nvert,total_e,nfaces,numholes,numcomponents);
return G;
}
/**
* Check if the given mesh is regular, semi-regular or irregular.
*
* Each vertex of a \em regular mesh has valence 6 except for border vertices
* which have valence 4.
*
* A \em semi-regular mesh is derived from an irregular one applying
* 1-to-4 subdivision recursively. (not checked for now)
*
* All other meshes are \em irregular.
*/
static void IsRegularMesh(MeshType &m, bool &Regular, bool &Semiregular)
{
RequireVFAdjacency(m);
Regular = true;
VertexIterator vi;
// for each vertex the number of edges are count
for (vi = m.vert.begin(); vi != m.vert.end(); ++vi)
{
if (!vi->IsD())
{
face::Pos<FaceType> he((*vi).VFp(), &*vi);
face::Pos<FaceType> ht = he;
int n=0;
bool border=false;
do
{
++n;
ht.NextE();
if (ht.IsBorder())
border=true;
}
while (ht != he);
if (border)
n = n/2;
if ((n != 6)&&(!border && n != 4))
{
Regular = false;
break;
}
}
}
if (!Regular)
Semiregular = false;
else
{
// For now we do not account for semi-regularity
Semiregular = false;
}
}
static bool IsCoherentlyOrientedMesh(MeshType &m)
{
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i)
if(!face::CheckOrientation(*fi,i))
return false;
return true;
}
static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable)
{
RequireFFAdjacency(m);
assert(&Oriented != &Orientable);
assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized
Orientable = true;
Oriented = true;
tri::UpdateSelection<MeshType>::FaceClear(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsS())
{
// each face put in the stack is selected (and oriented)
fi->SetS();
faces.push(&(*fi));
// empty the stack
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
// get one of the adjacent face
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j))
{
if (!CheckOrientation(*fpaux, iaux))
{
Oriented = false;
if (!fpaux->IsS())
{
face::SwapEdge<FaceType,true>(*fpaux, iaux);
assert(CheckOrientation(*fpaux, iaux));
}
else
{
Orientable = false;
break;
}
}
// put the oriented face into the stack
if (!fpaux->IsS())
{
fpaux->SetS();
faces.push(fpaux);
}
}
}
}
}
if (!Orientable) break;
}
}
/// Flip the orientation of the whole mesh flipping all the faces (by swapping the first two vertices)
static void FlipMesh(MeshType &m, bool selected=false)
{
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD())
if(!selected || (*fi).IsS())
{
face::SwapEdge<FaceType,false>((*fi), 0);
if (HasPerWedgeTexCoord(m))
std::swap((*fi).WT(0),(*fi).WT(1));
}
}
/// Flip a mesh so that its normals are orented outside.
/// Just for safety it uses a voting scheme.
/// It assumes that
/// mesh has already has coherent normals.
/// mesh is watertight and signle component.
static bool FlipNormalOutside(MeshType &m)
{
if(m.vert.empty()) return false;
tri::UpdateNormal<MeshType>::PerVertexAngleWeighted(m);
tri::UpdateNormal<MeshType>::NormalizePerVertex(m);
std::vector< VertexPointer > minVertVec;
std::vector< VertexPointer > maxVertVec;
// The set of directions to be choosen
std::vector< CoordType > dirVec;
dirVec.push_back(CoordType(1,0,0));
dirVec.push_back(CoordType(0,1,0));
dirVec.push_back(CoordType(0,0,1));
dirVec.push_back(CoordType( 1, 1,1));
dirVec.push_back(CoordType(-1, 1,1));
dirVec.push_back(CoordType(-1,-1,1));
dirVec.push_back(CoordType( 1,-1,1));
for(size_t i=0;i<dirVec.size();++i)
{
Normalize(dirVec[i]);
minVertVec.push_back(&*m.vert.begin());
maxVertVec.push_back(&*m.vert.begin());
}
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD())
{
for(size_t i=0;i<dirVec.size();++i)
{
if( (*vi).cP().dot(dirVec[i]) < minVertVec[i]->P().dot(dirVec[i])) minVertVec[i] = &*vi;
if( (*vi).cP().dot(dirVec[i]) > maxVertVec[i]->P().dot(dirVec[i])) maxVertVec[i] = &*vi;
}
}
int voteCount=0;
ScalarType angleThreshold = cos(math::ToRad(85.0));
for(size_t i=0;i<dirVec.size();++i)
{
// qDebug("Min vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],minVertVec[i]->P()[0],minVertVec[i]->P()[1],minVertVec[i]->P()[2]);
// qDebug("Max vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],maxVertVec[i]->P()[0],maxVertVec[i]->P()[1],maxVertVec[i]->P()[2]);
if(minVertVec[i]->N().dot(dirVec[i]) > angleThreshold ) voteCount++;
if(maxVertVec[i]->N().dot(dirVec[i]) < -angleThreshold ) voteCount++;
}
// qDebug("votecount = %i",voteCount);
if(voteCount < int(dirVec.size())/2) return false;
FlipMesh(m);
return true;
}
// Search and remove small single triangle folds
// - a face has normal opposite to all other faces
// - choose the edge that brings to the face f1 containing the vertex opposite to that edge.
static int RemoveFaceFoldByFlip(MeshType &m, float normalThresholdDeg=175, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
ScalarType NormalThrRad = math::ToRad(normalThresholdDeg);
ScalarType eps = 0.0001; // this epsilon value is in absolute value. It is a distance from edge in baricentric coords.
//detection stage
for(FaceIterator fi=m.face.begin();fi!= m.face.end();++fi ) if(!(*fi).IsV())
{ Point3<ScalarType> NN = vcg::TriangleNormal((*fi)).Normalize();
if( vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(0)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(1)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(2)).Normalize()) > NormalThrRad )
{
(*fi).SetS();
//(*fi).C()=Color4b(Color4b::Red);
// now search the best edge to flip
for(int i=0;i<3;i++)
{
Point3<ScalarType> &p=(*fi).P2(i);
Point3<ScalarType> L;
bool ret = vcg::InterpolationParameters((*(*fi).FFp(i)),TriangleNormal(*(*fi).FFp(i)),p,L);
if(ret && L[0]>eps && L[1]>eps && L[2]>eps)
{
(*fi).FFp(i)->SetS();
(*fi).FFp(i)->SetV();
//(*fi).FFp(i)->C()=Color4b(Color4b::Green);
if(face::CheckFlipEdge<FaceType>( *fi, i )) {
face::FlipEdge<FaceType>( *fi, i );
++count; ++total;
}
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByFlip(MeshType &m, float threshold=40, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
// Find largest triangle side
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
if(face::CheckFlipEdge<FaceType>( *f, i )) {
// Check if EdgeFlipping improves quality
FacePointer g = f->FFp(i); int k = f->FFi(i);
Triangle3<ScalarType> t1(f->P(i), f->P1(i), f->P2(i)), t2(g->P(k), g->P1(k), g->P2(k)),
t3(f->P(i), g->P2(k), f->P2(i)), t4(g->P(k), f->P2(i), g->P2(k));
if ( std::min( QualityFace(t1), QualityFace(t2) ) < std::min( QualityFace(t3), QualityFace(t4) ))
{
face::FlipEdge<FaceType>( *f, i );
++count; ++total;
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByCollapse(MeshType &m, float threshold=40, bool repeat=true)
{
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]);
float sides[3];
CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
int j = Distance(dummy,f->P(i))<Distance(dummy,f->P1(i))?i:(i+1)%3;
f->P2(i) = f->P(j); tri::Mark(m,f->V(j));
++count; ++total;
}
}
tri::Clean<MeshType>::RemoveDuplicateVertex(m);
tri::Allocator<MeshType>::CompactFaceVector(m);
tri::Allocator<MeshType>::CompactVertexVector(m);
}
while( repeat && count );
return total;
}
static bool SelfIntersections(MeshType &m, std::vector<FaceType*> &ret)
{
RequirePerFaceMark(m);
ret.clear();
int referredBit = FaceType::NewBitFlag();
tri::UpdateFlags<MeshType>::FaceClear(m,referredBit);
TriMeshGrid gM;
gM.Set(m.face.begin(),m.face.end());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
(*fi).SetUserBit(referredBit);
Box3< ScalarType> bbox;
(*fi).GetBBox(bbox);
std::vector<FaceType*> inBox;
vcg::tri::GetInBoxFace(m, gM, bbox,inBox);
bool Intersected=false;
typename std::vector<FaceType*>::iterator fib;
for(fib=inBox.begin();fib!=inBox.end();++fib)
{
if(!(*fib)->IsUserBit(referredBit) && (*fib != &*fi) )
if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){
ret.push_back(*fib);
if(!Intersected) {
ret.push_back(&*fi);
Intersected=true;
}
}
}
inBox.clear();
}
FaceType::DeleteBitFlag(referredBit);
return (ret.size()>0);
}
/**
This function simply test that the vn and fn counters be consistent with the size of the containers and the number of deleted simplexes.
*/
static bool IsSizeConsistent(MeshType &m)
{
int DeletedVertNum=0;
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if((*vi).IsD()) DeletedVertNum++;
int DeletedEdgeNum=0;
for (EdgeIterator ei = m.edge.begin(); ei != m.edge.end(); ++ei)
if((*ei).IsD()) DeletedEdgeNum++;
int DeletedFaceNum=0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if((*fi).IsD()) DeletedFaceNum++;
if(size_t(m.vn+DeletedVertNum) != m.vert.size()) return false;
if(size_t(m.en+DeletedEdgeNum) != m.edge.size()) return false;
if(size_t(m.fn+DeletedFaceNum) != m.face.size()) return false;
return true;
}
/**
This function simply test that all the faces have a consistent face-face topology relation.
useful for checking that a topology modifying algorithm does not mess something.
*/
static bool IsFFAdjacencyConsistent(MeshType &m)
{
RequireFFAdjacency(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
for(int i=0;i<3;++i)
if(!FFCorrectness(*fi, i)) return false;
}
return true;
}
/**
This function simply test that a mesh has some reasonable tex coord.
*/
static bool HasConsistentPerWedgeTexCoord(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{ FaceType &f=(*fi);
if( ! ( (f.WT(0).N() == f.WT(1).N()) && (f.WT(0).N() == (*fi).WT(2).N()) ) )
return false; // all the vertices must have the same index.
if((*fi).WT(0).N() <0) return false; // no undefined texture should be allowed
}
return true;
}
/**
Simple check that there are no face with all collapsed tex coords.
*/
static bool HasZeroTexCoordFace(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
if( (*fi).WT(0).P() == (*fi).WT(1).P() && (*fi).WT(0).P() == (*fi).WT(2).P() ) return false;
}
return true;
}
/**
This function test if two triangular faces of a mesh intersect.
It assumes that the faces (as storage) are different (e.g different address)
If the two faces are different but coincident (same set of vertexes) return true.
if the faces share an edge no test is done.
if the faces share only a vertex, the opposite edge is tested against the face
*/
static bool TestFaceFaceIntersection(FaceType *f0,FaceType *f1)
{
assert(f0!=f1);
int sv = face::CountSharedVertex(f0,f1);
if(sv==3) return true;
if(sv==0) return (vcg::IntersectionTriangleTriangle<FaceType>((*f0),(*f1)));
// if the faces share only a vertex, the opposite edge (as a segment) is tested against the face
// to avoid degenerate cases where the two triangles have the opposite edge on a common plane
// we offset the segment to test toward the shared vertex
if(sv==1)
{
int i0,i1; ScalarType a,b;
face::FindSharedVertex(f0,f1,i0,i1);
CoordType shP = f0->V(i0)->P()*0.5;
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f0).V1(i0)->P()*0.5+shP,(*f0).V2(i0)->P()*0.5+shP), *f1, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f1).V1(i1)->P()*0.5+shP,(*f1).V2(i1)->P()*0.5+shP), *f0, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
}
return false;
}
/**
This function merge all the vertices that are closer than the given radius
*/
static int MergeCloseVertex(MeshType &m, const ScalarType radius)
{
int mergedCnt=0;
mergedCnt = ClusterVertex(m,radius);
RemoveDuplicateVertex(m,true);
return mergedCnt;
}
static int ClusterVertex(MeshType &m, const ScalarType radius)
{
if(m.vn==0) return 0;
// some spatial indexing structure does not work well with deleted vertices...
tri::Allocator<MeshType>::CompactVertexVector(m);
typedef vcg::SpatialHashTable<VertexType, ScalarType> SampleSHT;
SampleSHT sht;
tri::EmptyTMark<MeshType> markerFunctor;
std::vector<VertexType*> closests;
int mergedCnt=0;
sht.Set(m.vert.begin(), m.vert.end());
UpdateFlags<MeshType>::VertexClearV(m);
for(VertexIterator viv = m.vert.begin(); viv!= m.vert.end(); ++viv)
if(!(*viv).IsD() && !(*viv).IsV())
{
(*viv).SetV();
Point3<ScalarType> p = viv->cP();
Box3<ScalarType> bb(p-Point3<ScalarType>(radius,radius,radius),p+Point3<ScalarType>(radius,radius,radius));
GridGetInBox(sht, markerFunctor, bb, closests);
// qDebug("Vertex %i has %i closest", &*viv - &*m.vert.begin(),closests.size());
for(size_t i=0; i<closests.size(); ++i)
{
ScalarType dist = Distance(p,closests[i]->cP());
if(dist < radius && !closests[i]->IsV())
{
// printf("%f %f \n",dist,radius);
mergedCnt++;
closests[i]->SetV();
closests[i]->P()=p;
}
}
}
return mergedCnt;
}
static std::pair<int,int> RemoveSmallConnectedComponentsSize(MeshType &m, int maxCCSize)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
std::vector<typename MeshType::FacePointer> FPV;
if(CCV[i].first<maxCCSize)
{
DeletedCC++;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
FPV.push_back(*ci);
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components smaller than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveSmallConnectedComponentsDiameter(MeshType &m, ScalarType maxDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3<ScalarType> bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()<maxDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components greater than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveHugeConnectedComponentsDiameter(MeshType &m, ScalarType minDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3f bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()>minDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/**
Select the folded faces using an angle threshold on the face normal.
The face is selected if the dot product between the face normal and the normal of the plane fitted
using the vertices of the one ring faces is below the cosThreshold.
The cosThreshold requires a negative cosine value (a positive value is clamp to zero).
*/
static void SelectFoldedFaceFromOneRingFaces(MeshType &m, ScalarType cosThreshold)
{
tri::RequireVFAdjacency(m);
tri::RequirePerFaceNormal(m);
tri::RequirePerVertexNormal(m);
vcg::tri::UpdateSelection<MeshType>::FaceClear(m);
vcg::tri::UpdateNormal<MeshType>::PerFaceNormalized(m);
vcg::tri::UpdateNormal<MeshType>::PerVertexNormalized(m);
vcg::tri::UpdateTopology<MeshType>::VertexFace(m);
if (cosThreshold > 0)
cosThreshold = 0;
#pragma omp parallel for schedule(dynamic, 10)
for (int i = 0; i < m.face.size(); i++)
{
std::vector<typename MeshType::VertexPointer> nearVertex;
std::vector<typename MeshType::CoordType> point;
typename MeshType::FacePointer f = &m.face[i];
for (int j = 0; j < 3; j++)
{
std::vector<typename MeshType::VertexPointer> temp;
vcg::face::VVStarVF<typename MeshType::FaceType>(f->V(j), temp);
typename std::vector<typename MeshType::VertexPointer>::iterator iter = temp.begin();
for (; iter != temp.end(); iter++)
{
if ((*iter) != f->V1(j) && (*iter) != f->V2(j))
{
nearVertex.push_back((*iter));
point.push_back((*iter)->P());
}
}
nearVertex.push_back(f->V(j));
point.push_back(f->P(j));
}
if (point.size() > 3)
{
vcg::Plane3<typename MeshType::ScalarType> plane;
vcg::FitPlaneToPointSet(point, plane);
float avgDot = 0;
for (int j = 0; j < nearVertex.size(); j++)
avgDot += plane.Direction().dot(nearVertex[j]->N());
avgDot /= nearVertex.size();
typename MeshType::VertexType::NormalType normal;
if (avgDot < 0)
normal = -plane.Direction();
else
normal = plane.Direction();
if (normal.dot(f->N()) < cosThreshold)
f->SetS();
}
}
}
}; // end class
/*@}*/
} //End Namespace Tri
} // End Namespace vcg
#endif
|
counting.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_COUNTING_H
#define RIPPLES_COUNTING_H
#include <algorithm>
#include <iterator>
#include <omp.h>
#include "ripples/utility.h"
namespace ripples {
//! \brief Count the occurrencies of vertices in the RRR sets.
//!
//! \tparam InItr The input sequence iterator type.
//! \tparam OutItr The output sequence iterator type.
//!
//! \param in_begin The begin of the sequence of RRR sets.
//! \param in_end The end of the sequence of RRR sets.
//! \param out_begin The begin of the sequence storing the counters for each
//! vertex.
//! \param out_end The end of the sequence storing the counters for each vertex.
template <typename InItr, typename OutItr>
void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, sequential_tag &&) {
using rrr_set_type = typename std::iterator_traits<InItr>::value_type;
using vertex_type = typename rrr_set_type::value_type;
for (; in_begin != in_end; ++in_begin) {
std::for_each(in_begin->begin(), in_begin->end(),
[&](const vertex_type v) { *(out_begin + v) += 1; });
}
}
template <typename InItr, typename OutItr>
void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, size_t num_threads) {
using rrr_set_type = typename std::iterator_traits<InItr>::value_type;
using vertex_type = typename rrr_set_type::value_type;
#pragma omp parallel num_threads(num_threads)
{
size_t num_elements = std::distance(out_begin, out_end);
size_t threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads();
vertex_type low = num_elements * threadnum / numthreads,
high = num_elements * (threadnum + 1) / numthreads;
for (auto itr = in_begin; itr != in_end; ++itr) {
auto begin = std::lower_bound(itr->begin(), itr->end(), low);
auto end = std::upper_bound(begin, itr->end(), high - 1);
std::for_each(begin, end,
[&](const vertex_type v) { *(out_begin + v) += 1; });
}
}
}
//! \brief Count the occurrencies of vertices in the RRR sets.
//!
//! \tparam InItr The input sequence iterator type.
//! \tparam OutItr The output sequence iterator type.
//!
//! \param in_begin The begin of the sequence of RRR sets.
//! \param in_end The end of the sequence of RRR sets.
//! \param out_begin The begin of the sequence storing the counters for each
//! vertex.
//! \param out_end The end of the sequence storing the counters for each vertex.
template <typename InItr, typename OutItr>
void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, omp_parallel_tag &&) {
size_t num_threads(1);
#pragma omp single
{ num_threads = omp_get_max_threads(); }
CountOccurrencies(in_begin, in_end, out_begin, out_end, num_threads);
}
//! \brief Update the coverage counters.
//!
//! \tparam RRRsetsItrTy The iterator type of the sequence of RRR sets.
//! \tparam VertexCoverageVectorTy The type of the vector storing counters.
//!
//! \param B The start sequence of RRRsets covered by the just selected seed.
//! \param E The start sequence of RRRsets covered by the just selected seed.
//! \param vertexCoverage The vector storing the counters to be updated.
template <typename RRRsetsItrTy, typename VertexCoverageVectorTy>
void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E,
VertexCoverageVectorTy &vertexCoverage, sequential_tag &&) {
for (; B != E; ++B) {
for (auto v : *B) {
vertexCoverage[v] -= 1;
}
}
}
template <typename RRRsetsItrTy, typename VertexCoverageVectorTy>
void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E,
VertexCoverageVectorTy &vertexCoverage,
size_t num_threads) {
for (; B != E; ++B) {
#pragma omp parallel for num_threads(num_threads)
for (size_t j = 0; j < (*B).size(); ++j) {
vertexCoverage[(*B)[j]] -= 1;
}
}
}
//! \brief Update the coverage counters.
//!
//! \tparam RRRsetsItrTy The iterator type of the sequence of RRR sets.
//! \tparam VertexCoverageVectorTy The type of the vector storing counters.
//!
//! \param B The start sequence of RRRsets covered by the just selected seed.
//! \param E The start sequence of RRRsets covered by the just selected seed.
//! \param vertexCoverage The vector storing the counters to be updated.
template <typename RRRsetsItrTy, typename VertexCoverageVectorTy>
void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E,
VertexCoverageVectorTy &vertexCoverage,
omp_parallel_tag &&) {
size_t num_threads(1);
#pragma omp single
{ num_threads = omp_get_max_threads(); }
UpdateCounters(B, E, vertexCoverage, num_threads);
}
//! \brief Initialize the Heap storage.
//!
//! \tparam InItr The input sequence iterator type.
//! \tparam OutItr The output sequence iterator type.
//!
//! \param in_begin The begin of the sequence of vertex counters.
//! \param in_end The end of the sequence of vertex counters.
//! \param out_begin The begin of the sequence used as storage in the Heap.
//! \param out_end The end of the sequence used as storage in the Heap.
template <typename InItr, typename OutItr>
void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, sequential_tag &&) {
using value_type = typename std::iterator_traits<OutItr>::value_type;
using vertex_type = typename value_type::first_type;
for (vertex_type v = 0; in_begin != in_end; ++in_begin, ++v, ++out_begin) {
*out_begin = {v, *in_begin};
}
}
template <typename InItr, typename OutItr>
void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, size_t num_threads) {
using value_type = typename std::iterator_traits<OutItr>::value_type;
using vertex_type = typename value_type::first_type;
#pragma omp parallel for num_threads(num_threads)
for (vertex_type v = 0; v < std::distance(in_begin, in_end); ++v) {
*(out_begin + v) = {v, *(in_begin + v)};
}
}
//! \brief Initialize the Heap storage.
//!
//! \tparam InItr The input sequence iterator type.
//! \tparam OutItr The output sequence iterator type.
//!
//! \param in_begin The begin of the sequence of vertex counters.
//! \param in_end The end of the sequence of vertex counters.
//! \param out_begin The begin of the sequence used as storage in the Heap.
//! \param out_end The end of the sequence used as storage in the Heap.
template <typename InItr, typename OutItr>
void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin,
OutItr out_end, omp_parallel_tag &&) {
size_t num_threads(1);
#pragma omp single
{ num_threads = omp_get_max_threads(); }
InitHeapStorage(in_begin, in_end, out_begin, out_end, num_threads);
}
} // namespace ripplse
#endif /* RIPPLES_COUNTING_H */
|
GB_unop__bnot_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__bnot_uint8_uint8
// op(A') function: GB_unop_tran__bnot_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__bnot_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = ~(z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__bnot_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bml_add_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_ellpack.h"
#include "bml_allocate_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Matrix addition.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_add_ellpack) (
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B,
double alpha,
double beta,
double threshold)
{
int N = A->N;
int A_M = A->M;
int B_M = B->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int *B_nnz = B->nnz;
int *B_index = B->index;
REAL_T *A_value = (REAL_T *) A->value;
REAL_T *B_value = (REAL_T *) B->value;
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[N], jx[N];
REAL_T x[N];
memset(ix, 0, N * sizeof(int));
memset(jx, 0, N * sizeof(int));
memset(x, 0.0, N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(N, A_M, B_M, myRank) \
shared(A_index, A_value, A_nnz) \
shared(A_localRowMin, A_localRowMax) \
shared(B_index, B_value, B_nnz)
#else
#pragma omp parallel for \
shared(N, A_M, B_M, myRank) \
shared(A_index, A_value, A_nnz) \
shared(A_localRowMin, A_localRowMax) \
shared(B_index, B_value, B_nnz) \
firstprivate(ix, jx, x)
#endif
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[N], jx[N];
REAL_T x[N];
memset(ix, 0, N * sizeof(int));
#endif
int l = 0;
if (alpha > (double) 0.0 || alpha < (double) 0.0)
for (int jp = 0; jp < A_nnz[i]; jp++)
{
int k = A_index[ROWMAJOR(i, jp, N, A_M)];
if (ix[k] == 0)
{
x[k] = 0.0;
ix[k] = i + 1;
jx[l] = k;
l++;
}
x[k] = x[k] + alpha * A_value[ROWMAJOR(i, jp, N, A_M)];
}
if (beta > (double) 0.0 || beta < (double) 0.0)
for (int jp = 0; jp < B_nnz[i]; jp++)
{
int k = B_index[ROWMAJOR(i, jp, N, B_M)];
if (ix[k] == 0)
{
x[k] = 0.0;
ix[k] = i + 1;
jx[l] = k;
l++;
}
x[k] = x[k] + beta * B_value[ROWMAJOR(i, jp, N, B_M)];
}
A_nnz[i] = l;
int ll = 0;
for (int jp = 0; jp < l; jp++)
{
int jind = jx[jp];
REAL_T xTmp = x[jind];
if (is_above_threshold(xTmp, threshold))
{
A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp;
A_index[ROWMAJOR(i, ll, N, A_M)] = jind;
ll++;
}
x[jind] = 0.0;
ix[jind] = 0;
}
A_nnz[i] = ll;
}
}
/** Matrix addition.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
* \param threshold Threshold for matrix addition
*/
double TYPED_FUNC(
bml_add_norm_ellpack) (
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B,
double alpha,
double beta,
double threshold)
{
int N = A->N;
int A_M = A->M;
int B_M = B->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int *B_nnz = B->nnz;
int *B_index = B->index;
int ind, ind2;
REAL_T *A_value = (REAL_T *) A->value;
REAL_T *B_value = (REAL_T *) B->value;
double trnorm = 0.0;
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[N], jx[N];
REAL_T x[N];
REAL_T y[N];
memset(ix, 0, N * sizeof(int));
memset(jx, 0, N * sizeof(int));
memset(x, 0.0, N * sizeof(REAL_T));
memset(y, 0.0, N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(N, A_M, B_M, myRank) \
shared(A_index, A_value, A_nnz) \
shared(A_localRowMin, A_localRowMax) \
shared(B_index, B_value, B_nnz) \
reduction(+:trnorm)
#else
#pragma omp parallel for \
shared(N, A_M, B_M, myRank) \
shared(A_index, A_value, A_nnz) \
shared(A_localRowMin, A_localRowMax) \
shared(B_index, B_value, B_nnz) \
firstprivate(ix, jx, x, y) \
reduction(+:trnorm)
#endif
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[N], jx[N];
REAL_T x[N];
REAL_T y[N];
memset(ix, 0, N * sizeof(int));
#endif
int l = 0;
for (int jp = 0; jp < A_nnz[i]; jp++)
{
int ind = ROWMAJOR(i, jp, N, A_M);
int k = A_index[ind];
if (ix[k] == 0)
{
x[k] = 0.0;
ix[k] = i + 1;
y[k] = 0.0;
//A_index[ROWMAJOR(i, l, N, A_M)] = k;
jx[l] = k;
l++;
}
x[k] = x[k] + alpha * A_value[ind];
y[k] = y[k] + A_value[ind];
}
for (int jp = 0; jp < B_nnz[i]; jp++)
{
int ind = ROWMAJOR(i, jp, N, B_M);
int k = B_index[ind];
if (ix[k] == 0)
{
x[k] = 0.0;
ix[k] = i + 1;
y[k] = 0.0;
jx[l] = k;
l++;
}
x[k] = x[k] + beta * B_value[ind];
y[k] = y[k] - B_value[ind];
}
A_nnz[i] = l;
int ll = 0;
for (int jp = 0; jp < l; jp++)
{
int jind = jx[jp];
REAL_T xTmp = x[jind];
trnorm += y[jind] * y[jind];
if (is_above_threshold(xTmp, threshold))
{
A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp;
A_index[ROWMAJOR(i, ll, N, A_M)] = jind;
ll++;
}
x[jind] = 0.0;
ix[jind] = 0;
y[jind] = 0.0;
}
A_nnz[i] = ll;
}
return trnorm;
}
/** Matrix addition.
*
* A = A + beta * I
*
* \ingroup add_group
*
* \param A Matrix A
* \param beta Scalar factor multiplied by I
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_add_identity_ellpack) (
bml_matrix_ellpack_t * A,
double beta,
double threshold)
{
REAL_T alpha = (REAL_T) 1.0;
bml_matrix_ellpack_t *Id =
TYPED_FUNC(bml_identity_matrix_ellpack) (A->N, A->M,
A->distribution_mode);
TYPED_FUNC(bml_add_ellpack) (A, Id, alpha, beta, threshold);
bml_deallocate_ellpack(Id);
}
/** Matrix addition.
*
* A = alpha * A + beta * I
*
* \ingroup add_group
*
* \param A Matrix A
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by I
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_scale_add_identity_ellpack) (
bml_matrix_ellpack_t * A,
double alpha,
double beta,
double threshold)
{
bml_matrix_ellpack_t *Id =
TYPED_FUNC(bml_identity_matrix_ellpack) (A->N, A->M,
A->distribution_mode);
TYPED_FUNC(bml_add_ellpack) (A, Id, alpha, beta, threshold);
bml_deallocate_ellpack(Id);
}
|
convolve.h | // #include <iostream>
#include "data.h"
#define INPUTIDX(OPIDX, S, idx) (OPIDX*S + idx)
#define INPUTSZ(OPSZ, S, K) (K + (OPSZ-1)*S)
// For accesses to ARRAY[idx3][idx2][idx1][idx0]
#define ARRAY(base, idx3, idx2, idx1, idx0, dim3, dim2, dim1, dim0) \
(*((base)+\
(\
( ( (dim0) * (dim1) * (dim2) ) * (idx3) ) + \
( ( (dim0) * (dim1) ) * (idx2) ) + \
( (dim0) * (idx1) ) + \
(idx0)\
)\
)\
)
// Perform a convolution given the tile data and hyper-parameters
void ompConvolve(DATA_T * input, DATA_T * output, DATA_T * weights,
int M, int N, int R, int C, int S, int K)
{
int R_ifm = INPUTSZ(R, S, K);
int C_ifm = INPUTSZ(C, S, K);
for (int in = 0; in < N; in ++)
{
#ifdef OMP_COMPILE
#pragma omp parallel for
#endif
for (int im = 0; im < M; im ++)
{
for (int ir=0; ir < R; ir++)
{
for (int ic = 0; ic < C; ic++)
{
DATA_T acc = 0;//ARRAY(output, 0, im, ir, ic, 0, M, R, C);
//#ifdef OMP_COMPILE
// #pragma omp parallel reduction(+:acc)
// Don't bother with this one... coordination actually slows it down to
// worse than single threaded
//#endif
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
acc +=
ARRAY(input, 0, in, INPUTIDX(ir, S, i), INPUTIDX(ic, S, j), 0, N, R_ifm, C_ifm)
*
ARRAY(weights, in, im, i, j, N, M, K, K);
}
}
ARRAY(output, 0, im, ir, ic, 0, M, R, C) += acc;
}
}
}
}
} |
app.c | /**
* Christina Giannoula
* cgiannoula: christina.giann@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct DBCSRMatrix* A;
static struct DCSRMatrix* B;
static struct COOMatrix* C;
static val_dt* x;
static val_dt* y;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t block_rows_per_dpu;
uint32_t prev_block_rows_dpu;
uint32_t blocks;
uint32_t blocks_pad;
uint32_t merge;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_vert_partition
* @param factor n to create partitions
* @param vertical_partitions
* @param/return horz_partitions
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
static void spmv_host(val_dt* y, struct DBCSRMatrix *A, val_dt* x) {
uint64_t total_blocks = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t c = 0; c < A->vert_partitions; c++) {
uint32_t ptr_offset = (r * A->vert_partitions + c) * (A->num_block_rows + 1);
for(uint64_t n=0; n < A->num_block_rows; n++) {
for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){
uint64_t j = A->bcolind[total_blocks + i];
for(uint64_t blr=0; blr < A->row_block_size; blr++){
val_dt acc = 0;
for(uint64_t blc=0; blc < A->col_block_size; blc++) {
acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[c * A->tile_width + j * A->col_block_size + blc];
}
y[r * A->tile_height + n * A->row_block_size + blr] += acc;
}
}
}
total_blocks += A->blocks_per_partition[r * A->vert_partitions + c];
}
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
C = readCOOMatrix(p.fileName);
sortCOOMatrix(C);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
B = coo2dcsr(C, horz_partitions, vert_partitions);
freeCOOMatrix(C);
A = dcsr2dbcsr(B, p.row_blsize, p.col_blsize);
countNNZperBlockDBCSRMatrix(A);
freeDCSRMatrix(B);
// Initialize partition data
part_info = partition_init(nr_of_dpus, NR_TASKLETS);
// Initialize help data - Padding needed
uint32_t ncols_pad = A->vert_partitions * A->tile_width + A->col_block_size;
uint32_t tile_width_pad = A->num_block_cols * A->col_block_size;
uint32_t nrows_pad = A->horz_partitions * A->tile_height + A->row_block_size;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
#if INT8
if (tile_width_pad % 2 != 0)
tile_width_pad++;
#endif
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_block_rows_per_dpu = 0;
uint64_t max_blocks_per_dpu = 0;
// Timer fore measurements
Timer timer;
i = 0;
uint32_t total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for block rows and non-zero elements needed for CPU-DPU transfers
uint64_t block_rows_per_dpu = A->num_block_rows+1;
uint64_t prev_block_rows_dpu = 0;
if (block_rows_per_dpu > max_block_rows_per_dpu)
max_block_rows_per_dpu = block_rows_per_dpu;
unsigned int blocks, blocks_pad;
blocks = A->blocks_per_partition[i];
if (blocks % 2 != 0)
blocks_pad = blocks + 1;
else
blocks_pad = blocks;
if (blocks_pad > max_blocks_per_dpu)
max_blocks_per_dpu = blocks_pad;
// Keep information per DPU
dpu_info[i].block_rows_per_dpu = block_rows_per_dpu;
dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu;
dpu_info[i].blocks = blocks;
dpu_info[i].blocks_pad = blocks_pad;
// Find input arguments per DPU
input_args[i].block_rows = block_rows_per_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].row_block_size = A->row_block_size;
input_args[i].col_block_size = A->col_block_size;
//input_args[i].blocks = blocks;
#if BLNC_TSKLT_BLOCK
// Load-balance blocks across tasklets
partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per DPU
input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t];
input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)];
}
total_blocks += A->blocks_per_partition[i];
}
// Initialization for parallel transfers
if (max_block_rows_per_dpu % 2 != 0)
max_block_rows_per_dpu++;
if (max_blocks_per_dpu % 2 != 0)
max_blocks_per_dpu++;
// Re-allocations for padding needed
A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt)));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_block_rows = max_block_rows_per_dpu;
input_args[i].max_blocks = max_blocks_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Browptr
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + i * (A->num_block_rows + 1)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bcolind
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks));
total_blocks += A->blocks_per_partition[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bvalues
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size)));
total_blocks += A->blocks_per_partition[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i % A->vert_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (defalut: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t;
#pragma omp parallel for num_threads(p.nthreads) shared(A, y, max_block_rows_per_dpu) private(r,c,t) collapse(2)
for (r = 0; r < A->horz_partitions; r++) {
for (t = 0; t < A->tile_height; t++) {
for (c = 1; c < A->vert_partitions; c++) {
y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t] += y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + c * max_block_rows_per_dpu * A->row_block_size + t];
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t t = 0; t < A->tile_height; t++) {
if((r * A->tile_height + t < A->nrows) && y_host[i] != y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t]) {
status = false;
}
i++;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeDBCSRMatrix(A);
free(x);
free(y);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
cast_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jiejun@openailab.com
*/
#include "cast_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct cast_param* cast_param = (struct cast_param*)ir_node->op.param_mem;
int type_from = input_tensor->data_type;
int type_to = output_tensor->data_type;
int num_thread = exec_graph->num_thread;
if (input_tensor->elem_num != output_tensor->elem_num || input_tensor->dim_num != output_tensor->dim_num)
{
return -1;
}
if (type_from == type_to)
{
memcpy(output_tensor->data, input_tensor->data, input_tensor->elem_num * input_tensor->elem_size);
return 0;
}
for (uint8_t i = 0; i < input_tensor->dim_num; i++)
{
if (input_tensor->dims[i] != output_tensor->dims[i])
return -1;
}
if (input_tensor->layout != output_tensor->layout)
{
return -1;
}
if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_FP16)
{
fp32_t* idata = (fp32_t*)input_tensor->data;
fp16_t* odata = (fp16_t*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = fp32_to_fp16(idata[i]);
}
return 0;
}
if (type_from == TENGINE_DT_FP16 && type_to == TENGINE_DT_FP32)
{
fp16_t* idata = (fp16_t*)input_tensor->data;
fp32_t* odata = (fp32_t*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = fp16_to_fp32(idata[i]);
}
return 0;
}
if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_UINT8)
{
float* idata = (float*)input_tensor->data;
uint8_t* odata = (uint8_t*)output_tensor->data;
if (1 == input_tensor->quant_param_num)
{
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
int val = (int)(roundf(idata[i] / scale)) + zero_point;
if (255 >= val && 0 <= val)
odata[i] = (uint8_t)val;
else
{
if (255 < val)
odata[i] = 255;
if (0 > val)
odata[i] = 0;
}
}
return 0;
}
}
if (type_from == TENGINE_DT_UINT8 && type_to == TENGINE_DT_FP32)
{
uint8_t* idata = (uint8_t*)input_tensor->data;
float* odata = (float*)output_tensor->data;
if (1 == input_tensor->quant_param_num)
{
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = (float)(idata[i] - zero_point) * scale;
}
return 0;
}
}
return -1;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* ir_graph = node->graph;
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
(void)node_ops;
(void)exec_graph;
(void)exec_node;
return OPS_SCORE_CANDO;
}
static struct node_ops ref_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_cast_ref_op(void* arg)
{
return register_builtin_node_ops(OP_CAST, &ref_node_ops);
}
int unregister_cast_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_CAST, &ref_node_ops);
}
|
3mm.c | /**
* 3mm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include "../../common/polybenchUtilFuncts.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
#define NI 1024
#define NJ 1024
#define NK 1024
#define NL 1024
#define NM 1024
#define GPU 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < NI; i++) {
for (j = 0; j < NK; j++) {
A[i * NK + j] = ((DATA_TYPE)i * j) / NI;
}
}
for (i = 0; i < NK; i++) {
for (j = 0; j < NJ; j++) {
B[i * NJ + j] = ((DATA_TYPE)i * (j + 1)) / NJ;
}
}
for (i = 0; i < NJ; i++) {
for (j = 0; j < NM; j++) {
C[i * NM + j] = ((DATA_TYPE)i * (j + 3)) / NL;
}
}
for (i = 0; i < NM; i++) {
for (j = 0; j < NL; j++) {
D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK;
}
}
}
void compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
if (percentDiff(G[i * NL + j], G_outputFromGpu[i * NL + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
int i, j, k;
/* E := A*B */
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
E[i * NJ + j] = 0;
for (k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
/* F := C*D */
for (i = 0; i < NJ; i++) {
for (j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
void mm3_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
int i, j, k;
/* E := A*B */
#pragma omp target device(GPU) map(to \
: A[:NI * NK], B \
[:NK * NJ]) map(from \
: E[:NI * NJ])
#pragma omp parallel for collapse(2)
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
E[i * NJ + j] = 0;
for (k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
/* F := C*D */
#pragma omp target map(to : C[:NJ * NM], D[:NM * NL]) map(from : F[:NJ * NL])
#pragma omp parallel for collapse(2)
for (i = 0; i < NJ; i++) {
for (j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
#pragma omp target map(to : E[:NI * NJ], F[:NJ * NL]) map(from : G[:NI * NL])
#pragma omp parallel for collapse(2)
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
int main(int argc, char **argv) {
double t_start, t_end;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *D;
DATA_TYPE *E;
DATA_TYPE *F;
DATA_TYPE *G;
DATA_TYPE *G_outputFromGpu;
A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(NJ * NM * sizeof(DATA_TYPE));
D = (DATA_TYPE *)malloc(NM * NL * sizeof(DATA_TYPE));
E = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
F = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE));
G = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE));
G_outputFromGpu = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE));
fprintf(
stdout,
"<< Linear Algebra: 3 Matrix Multiplications (E=A.B; F=C.D; G=E.F) >>\n");
init_array(A, B, C, D);
t_start = rtclock();
mm3_OMP(A, B, C, D, E, F, G_outputFromGpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
mm3_cpu(A, B, C, D, E, F, G);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(G, G_outputFromGpu);
free(A);
free(B);
free(C);
free(D);
free(E);
free(F);
free(G);
free(G_outputFromGpu);
return 0;
}
|
flo.c | #define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <omp.h>
#include <immintrin.h>
#define SSE_WIDTH 4
#define N_FLO_BANDS 2
#define TAG_STRING "PIEH"
void construct_flo(float* flo, const float* u, const float* v, const int SIZE) {
for (int i=0; i < SIZE; i++) {
flo[i * N_FLO_BANDS] = u[i];
flo[(i * N_FLO_BANDS) + 1] = v[i];
}
}
void get_u_v(float* u, float* v, const float* flo, const int SIZE, const int NUMT) {
/* Copy flo array to seperate u/v arrays, size is size of u/v array */
omp_set_num_threads(NUMT);
int i;
#pragma omp parallel for default(none) shared(SIZE, u, v, flo)
for (i=0; i < SIZE; i++) {
u[i] = flo[i * N_FLO_BANDS];
v[i] = flo[(i * N_FLO_BANDS) + 1];
}
}
void write_flo(const float* flo, const char* filename, const int NROWS, const int NCOLS) {
// open file
FILE *stream = fopen(filename, "wb");
if (stream == 0) {
printf("filname: %s is invalid, writing file failed!\n", filename);
}
// write header
fprintf(stream, TAG_STRING); // write tag - "PIEH" in ASCII
fwrite(&NCOLS, sizeof(int), 1, stream); // write width as 32 bit int
fwrite(&NROWS, sizeof(int), 1, stream); // write row as 32 bit int
// write data flattened - u[0,0],v[0,0],u[0,1],v[0,1].....
int n = NCOLS * N_FLO_BANDS;
for (int r=0; r < NROWS; r++) {
fwrite(&flo[r*NCOLS*N_FLO_BANDS], sizeof(float), n, stream);
}
fclose(stream);
}
void write_flo_MP(const float* u, const float* v, const char* fbase, const int NUMF, const int NROWS, const int NCOLS, const int START_IDX, const int NUMT) {
// Set # of threads to use
if (NUMF < NUMT)
omp_set_num_threads(NUMF);
else
omp_set_num_threads(NUMT);
const int SIZE = NROWS * NCOLS;
const int F_SIZE = (int)strlen(fbase);
int i;
#pragma omp parallel for default(none) shared(NUMF, SIZE, F_SIZE, START_IDX, NROWS, NCOLS, u, v, fbase)
for (i=0; i < NUMF; i++) {
// construct flo array
float* flo = (float*)malloc(sizeof(float) * N_FLO_BANDS * SIZE);
construct_flo(flo, u + (i*SIZE), v + (i*SIZE), SIZE);
// now save array as flo
char* filename = (char*)malloc(sizeof(char) * (F_SIZE + 9));
#if defined(__linux__)
snprintf(filename, F_SIZE+1, "%s", fbase);
#elif defined(_WIN32)
snprintf(filename, F_SIZE, fbase);
#endif
const int idx = START_IDX + i;
if (idx < 10)
snprintf(filename + F_SIZE, 9, "000%d.flo", idx);
else if (idx < 100)
snprintf(filename + F_SIZE, 9, "00%d.flo", idx);
else if (idx < 1000)
snprintf(filename + F_SIZE, 9, "0%d.flo", idx);
else
snprintf(filename + F_SIZE, 9, "%d.flo", idx);
write_flo(flo, filename, NROWS, NCOLS);
free(flo);
free(filename);
}
}
void read_flo(float* flo, const char* filename, const int NROWS, const int NCOLS) {
FILE *stream = fopen(filename, "rb");
if (stream == 0) {
printf("filname: %s is invalid, writing file failed!\n", filename);
}
// read header
char header[4];
//float* flo;
fscanf(stream, "%4c", header);
//printf("header: %s\n", header);
if (strcmp(header, TAG_STRING)) {
int ncols, nrows;
fread(&ncols, sizeof(int), 1, stream);
fread(&nrows, sizeof(int), 1, stream);
//flo = (float*)malloc(sizeof(float) * N_FLO_BANDS * nrows * ncols);
if (nrows == NROWS && ncols == NCOLS) {
const int N = NCOLS * N_FLO_BANDS;
for (int r=0; r < NROWS; r++) {
fread(&flo[r * N], sizeof(float), N, stream);
}
}
else {
printf("dims provided are incorrect NROWS provided: %d, nrows read: %d, NCOLS provided: %d, ncols read: %d\n", NROWS, nrows, NCOLS, ncols);
}
}
else {
printf("Invalid flo header tag: %s, should be: %s", header, TAG_STRING);
}
}
void multiply_flo_scalers(float* cpy_flo, const float* src_flo, const int ITERS, const int NROWS, const int NCOLS, const int NUMT) {
omp_set_num_threads(NUMT);
const int SIZE = NROWS * NCOLS * N_FLO_BANDS;
const int size_limit = (SIZE / SSE_WIDTH) * SSE_WIDTH;
//register float *p_cpy = cpy_flo;
const register float *p_src = src_flo;
for (int t=0; t < ITERS; t++) {
const int i = t * SIZE;
const float scalerF = (float)t+1;
const __m128 sf_ps = _mm_load_ps1(&scalerF);
int j;
#pragma omp parallel for default(none) shared(size_limit, sf_ps, i, cpy_flo, p_src)
for (j=0; j < size_limit; j+=SSE_WIDTH) {
__m128 src_ps = _mm_loadu_ps(&p_src[j]);
_mm_storeu_ps(&cpy_flo[i+j], _mm_mul_ps(src_ps, sf_ps));
//cpy_flo[i + j] = src_flo[j] * scalerF;
}
for (int j=size_limit; j < SIZE; j++) {
cpy_flo[i + j] = src_flo[j] * scalerF;
}
}
}
void multiply_flo_mask_arr(float* arr1, const uint8_t* arr2, const int SIZE, const int NUMT) {
/* multiply flo grid by 8bit binary mask */
omp_set_num_threads(NUMT);
int i;
#pragma omp parallel for default(none) shared(SIZE, arr1, arr2)
for (i=0; i < SIZE; i++) {
arr1[i * N_FLO_BANDS]*= arr2[i];
arr1[(i * N_FLO_BANDS) + 1]*= arr2[i];
}
}
void multiply_flo_and_save(const float* u, const float* v, const char* filename, const int iters, const int NROWS, const int NCOLS, const int NUMT) {
if (iters < NUMT)
omp_set_num_threads(iters);
else
omp_set_num_threads(NUMT);
const int SIZE = NROWS * NCOLS;
const int F_SIZE = (int)strlen(filename);
int i;
//#pragma omp parallel for default(none) shared(u, v, filename, SIZE, NROWS, NCOLS, F_SIZE)
for (i=0; i < iters; i++) {
// create and construct flo matrix
float* flo = (float*)malloc(sizeof(float) * N_FLO_BANDS * SIZE);
float scalerF = (float)i+1;
for (int j=0; j < SIZE; j++) {
flo[j*N_FLO_BANDS] = u[j] * scalerF;
flo[(j*N_FLO_BANDS)+1] = v[j] * scalerF;
}
// now save array as flo TODO: make sure string is null terminated
char* filename_iter = (char*)malloc(sizeof(char) * (F_SIZE + 7));
snprintf(filename_iter, F_SIZE, "%s", filename);
if (i < 10)
snprintf(filename_iter + F_SIZE, 7, "0%d.flo", i);
else
snprintf(filename_iter + F_SIZE, 7, "%d.flo", i);
write_flo(flo, filename_iter, NROWS, NCOLS);
free(filename_iter);
free(flo);
}
} |
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% John Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatMagickString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],duplex_indexes[x]);
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],duplex_indexes[x]);
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
destination_indexes[x]);
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],destination_indexes[x]);
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
destination_indexes[x]=PixelGetBlackQuantum(
destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const PixelWand *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatMagickString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->view=AcquireCacheView(wand_view->wand->images);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
wand_view->exception=AcquireExceptionInfo();
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatMagickString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->view=AcquireCacheView(wand_view->wand->images);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->exception=AcquireExceptionInfo();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(destination->number_threads)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
indexes[x]=PixelGetBlackQuantum(destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > GetOpenMPMaximumThreads())
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],indexes[x]);
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],indexes[x]);
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
destination_indexes[x]=PixelGetBlackQuantum(
destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
indexes[x]=PixelGetBlackQuantum(source->pixel_wands[id][x]);
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
mongodb_scram_fmt_plug.c | /*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mongodb_scram;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mongodb_scram);
#else
#include <openssl/sha.h>
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha.h"
#include "base64_convert.h"
#include "hmac_sha.h"
#include "simd-intrinsics.h"
//#undef SIMD_COEF_32
#include "pbkdf2_hmac_sha1.h"
#include "md5.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#if defined SIMD_COEF_32
#define SIMD_KEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#define FORMAT_LABEL "scram"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SCRAM PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 125
#define HASH_LENGTH 28
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(uint32_t)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#if !defined(SIMD_COEF_32)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#else
#define MIN_KEYS_PER_CRYPT SIMD_KEYS
#define MAX_KEYS_PER_CRYPT SIMD_KEYS
#endif
#define FORMAT_TAG "$scram$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define MAX_USERNAME_LENGTH 128
static struct fmt_tests tests[] = {
{"$scram$someadmin$10000$wf42AF7JaU1NSeBaSmkKzw==$H6A5RF0qz6DrcWNNX4xe+wIeVEw=", "secret"},
{"$scram$admin$10000$ouQdw5om9Uc5gxulO9F/8w==$DSnATYsgoE8InL5Petfjp8MWGh4=", "test@12345"},
{NULL}
};
static struct custom_salt {
int saltlen;
int iterations;
char username[MAX_USERNAME_LENGTH + 1];
unsigned char salt[18 + 1]; /* base64 decoding, 24 / 4 * 3 = 18 */
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) /* username */
goto err;
if (strlen(p) >= MAX_USERNAME_LENGTH)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p)-2 != base64_valid_length(p, e_b64_mime, flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p) > 24)
goto err;
if ((p = strtokm(NULL, "")) == NULL) /* hash */
goto err;
if (strlen(p)-1 != base64_valid_length(p, e_b64_mime, flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p) > HASH_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy, *keeptr, *p;
memset(&cs, 0, sizeof(cs));
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LENGTH;
p = strtokm(ctcopy, "$");
strncpy(cs.username, p, 128);
p = strtokm(NULL, "$");
cs.iterations = atoi(p);
p = strtokm(NULL, "$");
base64_convert(p, e_b64_mime, strlen(p), (char*)cs.salt, e_b64_raw, sizeof(cs.salt), flg_Base64_NO_FLAGS, 0);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_convert(p, e_b64_mime, strlen(p), (char*)out, e_b64_raw, sizeof(buf.c), flg_Base64_DONOT_NULL_TERMINATE, 0);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
inline static void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#if !defined (SIMD_COEF_32)
SHA_CTX ctx;
MD5_CTX mctx;
unsigned char hexhash[32];
unsigned char hash[16];
unsigned char out[BINARY_SIZE];
MD5_Init(&mctx);
MD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&mctx, ":mongo:", 7);
MD5_Update(&mctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &mctx);
hex_encode(hash, 16, hexhash);
pbkdf2_sha1(hexhash, 32, cur_salt->salt, 16,
cur_salt->iterations, out, BINARY_SIZE, 0);
hmac_sha1(out, BINARY_SIZE, (unsigned char*)"Client Key", 10, out, BINARY_SIZE);
SHA1_Init(&ctx);
SHA1_Update(&ctx, out, BINARY_SIZE);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
#else
SHA_CTX ctx;
MD5_CTX mctx;
int i;
unsigned char hexhash_[SIMD_KEYS][32], *hexhash[SIMD_KEYS];
unsigned char hash[16];
int lens[SIMD_KEYS];
unsigned char out_[SIMD_KEYS][BINARY_SIZE], *out[SIMD_KEYS];
for (i = 0; i < SIMD_KEYS; ++i) {
MD5_Init(&mctx);
MD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&mctx, ":mongo:", 7);
MD5_Update(&mctx, saved_key[index+i], strlen(saved_key[index+i]));
MD5_Final(hash, &mctx);
hexhash[i] = hexhash_[i];
hex_encode(hash, 16, hexhash[i]);
lens[i] = 32;
out[i] = out_[i];
}
pbkdf2_sha1_sse((const unsigned char **)hexhash, lens, cur_salt->salt, 16,
cur_salt->iterations, out, BINARY_SIZE, 0);
for (i = 0; i < SIMD_KEYS; ++i) {
hmac_sha1(out[i], BINARY_SIZE, (unsigned char*)"Client Key", 10, out[i], BINARY_SIZE);
SHA1_Init(&ctx);
SHA1_Update(&ctx, out[i], BINARY_SIZE);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mongodb_scram = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__ainv_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_uint16
// op(A') function: GB_tran__ainv_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_uint16
(
float *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[SIZE_OF_BUFFERS],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
static double randlc2( double *X, double *A );
void full_verify( void );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
static double randlc2(double *X, double *A)
{
static int KS=0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
int i, j, k;
k = MAX_KEY/4;
for (i=0; i<NUM_KEYS; i++)
{
x = randlc2(&seed, &a);
x += randlc2(&seed, &a);
x += randlc2(&seed, &a);
x += randlc2(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %d\n",
j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[MAX_KEY];
#pragma omp master
{
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] = 0;
}
#pragma omp barrier
for (i=0; i<MAX_KEY; i++)
prv_buff1[i] = 0;
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ ) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
prv_buff1[key_buff2[i]]++; /* Now they have individual key */
}
/* population */
for( i=0; i<MAX_KEY-1; i++ )
prv_buff1[i+1] += prv_buff1[i];
#pragma omp critical
{
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] += prv_buff1[i];
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
#pragma omp barrier
#pragma omp master
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff1;
} /* end master */
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main(int argc, char** argv )
{
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Initialize timer */
timer_clear( 0 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
#pragma omp parallel
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
#pragma omp parallel private(iteration)
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
#pragma omp master
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
TOTAL_KEYS,
0,
0,
MAX_ITERATIONS,
nthreads,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS,
"randlc2");
return 0;
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
PSFHandle.h | #pragma once
#include "ps/psf/PSFunc.h"
#include "common/thread_safe_hash_map.h"
#include "param.h"
#include <algorithm>
#include <utility>
#include <mutex>
#include <omp.h>
#include <random>
#include <fstream>
namespace ps {
/**
* \brief used in ML part for sparse/dense pull, push.
* keys is used for the key of one partition.
* lens is used as the offset of the keys.
* vals is vals.
* One key (two keys for binary op) per request in Athena.
* Is it ok in a lock-free manner? By @Zhipeng
*/
class KVServerMatrixHandle {
public:
KVServerMatrixHandle() {}
KVServerMatrixHandle(const KVServerMatrixHandle& handle) {}
void serve(const PSFData<DensePull>::Request &request, PSFData<DensePull>::Response &response) {
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *iter->second;
size_t data_size = value_set_.size();
CHECK_EQ(len, data_size) << " size mismatch in DensePull " << k << " " << len << " " << data_size;
pull_vals.resize(data_size);
auto read_lock = value_set_.read_guard();
std::copy(value_set_.begin(), value_set_.end(), pull_vals.begin());
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<DensePush>::Request &request, PSFData<DensePush>::Response &response) {
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> vals = get<2>(request);
if (const_store.find(k) == const_store.end()) {
store[k] = std::make_shared<Param<float>>(len);
}
auto iter = const_store.find(k);
if (iter != const_store.end()) {
CHECK_EQ(len, iter->second->size()) << k << " " << len <<" " << iter->second->size() <<" size mismatch in DensePush";
// write, discard const qualifier
auto &value_set_ = *const_cast<typename tmap::mapped_type&>(iter->second);
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++)
value_set_[j] += vals[j];
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<DDPushPull>::Request &request, PSFData<DDPushPull>::Response &response) {
// one key per request.
// with response result
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> vals = get<2>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *const_cast<typename tmap::mapped_type&>(iter->second);
size_t data_size = value_set_.size();
CHECK_EQ(len, data_size) << " size mismatch in DDPushPull " << len << " " << data_size;
pull_vals.resize(data_size);
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < data_size; j++) {
value_set_[j] += vals[j];
pull_vals[j] = value_set_[j];
}
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<SparsePull>::Request &request, PSFData<SparsePull>::Response &response) {
// we use length as the offset, i.e., #length = #vals.
// with response result
Key k = get<0>(request);
SArray<size_t> offset = get<1>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
pull_vals.resize(offset.size() * width);
auto read_lock = value_set_.read_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offset.size(); ++j) {
auto value_begin = value_set_.data() + offset[j] * width;
auto value_end = value_begin + width;
auto dst_begin = pull_vals.data() + j * width;
std::copy(value_begin, value_end, dst_begin);
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pulled key: " << k
<< " does not exist on PS in SparsePull.";
}
}
void serve(const PSFData<SparsePush>::Request &request, PSFData<SparsePush>::Response &response) {
// we use length as the offset, i.e., #length = #vals.
// no response result
Key k = get<0>(request);
SArray<size_t> offsets = get<1>(request);
SArray<float> vals = get<2>(request);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
CHECK_EQ(vals.size(), offsets.size() * width)
<< " in Psf::SparsePush check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SparsePush.";
}
}
void serve(const PSFData<SDPushPull>::Request &request, PSFData<SDPushPull>::Response &response) {
Key k = get<0>(request);
SArray<size_t> offsets = get<1>(request);
SArray<float> vals = get<2>(request);
size_t len = get<3>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
CHECK_EQ(len, value_set_.size()) << " size mismatch in SDPushPull " << k << " " << len << " " << value_set_.size();
// sparsepush phase
if (vals.size() > 0) {
CHECK_EQ(vals.size(), offsets.size() * width)
<< " in Psf::SDPushPull check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
}
// densepull phase
pull_vals.resize(value_set_.size());
auto read_lock = value_set_.read_guard();
std::copy(value_set_.begin(), value_set_.end(), pull_vals.begin());
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SDPushPull.";
}
}
void serve(const PSFData<SSPushPull>::Request &request, PSFData<SSPushPull>::Response &response) {
Key k = get<0>(request);
SArray<size_t> push_offsets = get<1>(request);
SArray<float> vals = get<2>(request);
SArray<size_t> pull_offsets = get<3>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
// sparsepush phase
if (vals.size() > 0) {
CHECK_EQ(vals.size(), push_offsets.size() * width)
<< " in Psf::SSPushPull check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< push_offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < push_offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = push_offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
}
// sparsepull phase
if (pull_offsets.size() > 0) {
pull_vals.resize(pull_offsets.size() * width);
auto read_lock = value_set_.read_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < pull_offsets.size(); ++j) {
auto val_begin = value_set_.begin() + pull_offsets[j] * width;
auto val_end = val_begin + width;
auto dst_begin = pull_vals.begin() + j * width;
std::copy(val_begin, val_end, dst_begin);
}
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SparsePush.";
}
}
void serve(const PSFData<kSyncEmbedding>::Request &request, PSFData<kSyncEmbedding>::Response &response);
void serve(const PSFData<kPushEmbedding>::Request &request, PSFData<kPushEmbedding>::Response &response);
void serve(const PSFData<kPushSyncEmbedding>::Request &request, PSFData<kPushSyncEmbedding>::Response &response);
void serve(const PSFData<ParamInit>::Request &request, PSFData<ParamInit>::Response &response) {
// one key per request.
// no response result
Key k = get<0>(request);
ParamType param_type = (ParamType)get<1>(request);
size_t len = get<2>(request);
size_t width = get<3>(request);
InitType init_type = (InitType)get<4>(request);
double init_a = get<5>(request);
double init_b = get<6>(request);
// std::cout << k << " " << len << " " << init_type << " " << init_a << " " << init_b << std::endl;
Param<float>* newParam = nullptr;
switch (param_type) {
case kParam:
newParam = new Param<float>(len);
break;
case kParam2D:
newParam = new Param2D<float>(len, width);
break;
case kCacheTable:
newParam = new CacheTable<float>(len, width);
}
auto iter = store.emplaceIfAbsent(k, newParam);
CHECK_EQ(len * width, iter->second->size()) << k << " " << len << " " << width << " " << iter->second->size() <<" size mismatch in UniformInit";
// write, discard const qualifier
auto &value_set_ = *const_cast<typename tmap::mapped_type&>(iter->second);
auto write_lock = value_set_.write_guard();
if (init_type == InitType::Constant) {
float filled_value = static_cast<float>(init_a);
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++)
value_set_[j] = filled_value;
} else if (init_type == InitType::Uniform) {
std::uniform_real_distribution<float> uniform_dist(init_a, init_b);
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++) {
thread_local std::default_random_engine generator(k*4+omp_get_thread_num());
value_set_[j] = uniform_dist(generator);
}
} else if (init_type == InitType::Normal) {
std::normal_distribution<float> normal_dist(init_a, init_b);
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++) {
thread_local std::default_random_engine generator(k*4+omp_get_thread_num());
value_set_[j] = normal_dist(generator);
}
} else if (init_type == InitType::TruncatedNormal) {
std::normal_distribution<float> truncated_normal_dist(init_a, init_b);
float upper_limit = init_a + 2 * init_b;
float lower_limit = init_a - 2 * init_b;
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++) {
thread_local std::default_random_engine generator(k*4+omp_get_thread_num());
float temp = truncated_normal_dist(generator);
while (temp > upper_limit || temp < lower_limit) temp = truncated_normal_dist(generator);
value_set_[j] = temp;
}
}
}
void serve(const PSFData<ParamClear>::Request &request, PSFData<ParamClear>::Response &response) {
Key k = get<0>(request);
auto iter = store.find(k);
if (iter != store.end()) {
store.erase(iter);
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamClear.";
}
}
void serve(const PSFData<ParamSave>::Request &request, PSFData<ParamSave>::Response &response) {
Key k = get<0>(request);
SArray<char> address = get<1>(request);
auto iter = store.find(k);
if (iter != store.end()) {
auto &value_set_ = *iter->second;
auto read_lock = value_set_.read_guard();
std::ofstream fout(std::string(address.data(), address.size()).c_str(), std::ios::binary);
fout.write((char*)value_set_.data(), value_set_.size() * sizeof(float));
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamSave.";
}
}
void serve(const PSFData<ParamLoad>::Request &request, PSFData<ParamLoad>::Response &response) {
Key k = get<0>(request);
SArray<char> address = get<1>(request);
auto iter = store.find(k);
if (iter != store.end()) {
auto &value_set_ = *iter->second;
auto write_lock = value_set_.write_guard();
std::ifstream fin(std::string(address.data(), address.size()).c_str(), std::ios::binary);
fin.read((char*)value_set_.data(), value_set_.size() * sizeof(float));
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamLoad.";
}
}
private:
typedef threadsafe_unordered_map<Key, std::shared_ptr<Param<float>>> tmap;
tmap store;
const tmap& const_store = store; // const reference to force compiler to use read lock
};
} // namespace ps
|
interpolate_v2_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
platform::errors::InvalidArgument(
"The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
inline void ExtractNCDWH(const framework::DDim& dims,
const DataLayout& data_layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
if (dims.size() == 3) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2];
*D = 1;
*H = 1;
*W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
} else if (dims.size() == 4) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3];
*D = 1;
*H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
} else {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4];
*D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
*W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3];
}
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
} else {
output_t(i, k, l, j) = input_t(i, in_k, in_l, j);
}
}
}
}
}
}
template <typename T>
static void LinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_w, const int in_w,
const int n, const int c, const int out_w,
const bool align_corners, const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 3>::From(input);
auto output_t = EigenTensor<T, 3>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(3)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int l = 0; l < out_w; l++) {
// linear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vx_w[l]) * vd_e[l] +
input_t(i, j, vx_e[l]) * vd_w[l];
output_t(i, j, l) = out_t;
} else {
out_t = input_t(i, vx_w[l], j) * vd_e[l] +
input_t(i, vx_e[l], j) * vd_w[l];
output_t(i, l, j) = out_t;
}
}
}
}
}
template <typename T>
static void LinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_w,
const int in_w, const int n, const int c,
const int out_w, const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 3>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 3>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// linear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, l);
input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e);
input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w);
} else {
const T grad = output_grad_t(i, l, j);
input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e);
input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
} else {
out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] +
input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] +
input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] +
input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l];
output_t(i, k, l, j) = out_t;
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
if (data_layout == DataLayout::kNCHW) {
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
} else {
T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, j, k, l, i) = out_t;
}
}
}
}
}
}
}
template <typename T>
HOSTDEVICE inline T cubic_convolution1(T x, T A) {
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
HOSTDEVICE inline T cubic_convolution2(T x, T A) {
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) {
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
static void BicubicInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
const T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
const T x_t = x_n - input_x;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
T coefficients[4];
// interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
static_cast<int>(0));
int access_x_0 =
std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0));
int access_x_1 =
std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0));
int access_x_2 =
std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0));
int access_x_3 =
std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
coefficients[ii] =
cubic_interp<T>(input_t(i, j, access_y, access_x_0),
input_t(i, j, access_y, access_x_1),
input_t(i, j, access_y, access_x_2),
input_t(i, j, access_y, access_x_3), x_t);
} else {
coefficients[ii] =
cubic_interp<T>(input_t(i, access_y, access_x_0, j),
input_t(i, access_y, access_x_1, j),
input_t(i, access_y, access_x_2, j),
input_t(i, access_y, access_x_3, j), x_t);
}
}
// interp y direction
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
} else {
output_t(i, k, l, j) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
} else {
input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j);
}
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w, const bool align_corners,
const int align_mode, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
} else {
const T grad = output_grad_t(i, k, l, j);
input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
} else {
const T grad = output_grad_t(b, j, k, l, i);
input_grad_t(b, t_f, y_n, x_w, i) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, t_f, y_n, x_e, i) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, t_f, y_s, x_w, i) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, t_f, y_s, x_e, i) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, t_b, y_n, x_w, i) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, t_b, y_n, x_e, i) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, t_b, y_s, x_w, i) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, t_b, y_s, x_e, i) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
}
template <typename T>
static void BicubicInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
T x_t = x_n - input_x;
T x_coeffs[4];
T y_coeffs[4];
get_cubic_upsample_coefficients<T>(x_coeffs, x_t);
get_cubic_upsample_coefficients<T>(y_coeffs, y_t);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bicubic interpolation grad
for (int ii = 0; ii < 4; ii++) {
for (int jj = 0; jj < 4; jj++) {
int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1),
static_cast<int>(0));
int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1),
static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, access_y, access_x) +=
grad * y_coeffs[jj] * x_coeffs[ii];
} else {
T grad = output_grad_t(i, k, l, j);
input_grad_t(i, access_y, access_x, j) +=
grad * y_coeffs[jj] * x_coeffs[ii];
}
}
}
}
}
}
}
}
template <typename T>
static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1.;
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
// float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
}
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_h = scale[0];
scale_w = scale[1];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_h > 0. && scale_w > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode,
data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners, data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, data_layout);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_w > 0. && scale_h > 0. && scale_d > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument(
"out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
float scale_w = -1.0;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_w};
} else {
dim_grad = {n, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_w = scale_data[0];
scale_h = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_h = scale[0];
scale_w = scale[1];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_h > 0. && scale_w > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_h, in_w};
} else {
dim_grad = {n, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners,
data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h,
in_w, n, c, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_d, in_h, in_w};
} else {
dim_grad = {n, in_d, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(
output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n,
c, out_d, out_h, out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
class InterpolateV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateV2GradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 3) { // 1D interpolation grad
Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
GB_AxB_saxpy3_slice_balanced.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If the mask is present but must be discarded, this function returns
// GrB_NO_VALUE, to indicate that the analysis was terminated early.
#include "GB_AxB_saxpy3.h"
// control parameters for generating parallel tasks
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
#define GB_MWORK_BETA 0.10
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Fine_fl, int64_t) ; \
GB_WERK_POP (Fine_slice, int64_t) ; \
GB_WERK_POP (Coarse_Work, int64_t) ; \
GB_WERK_POP (Coarse_initial, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE_WERK (&SaxpyTasks, SaxpyTasks_size) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected but the hash table
// equals or exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
int64_t hash_size ;
if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2)
{
//----------------------------------------------------------------------
// use Gustavson if selected explicitly or if flmax is large
//----------------------------------------------------------------------
hash_size = cvlen ;
}
else
{
//----------------------------------------------------------------------
// flmax is small; consider hash vs Gustavson
//----------------------------------------------------------------------
// hash_size = 2 * (smallest power of 2 >= flmax)
hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/12) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *SaxpyTasks,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
int64_t *Coarse_Work, // workspace for parallel reduction for flop count
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
//--------------------------------------------------------------------------
// find the max # of flops for any vector in this task
//--------------------------------------------------------------------------
int64_t nk = klast - kfirst + 1 ;
int nth = GB_nthreads (nk, chunk, nthreads_max) ;
// each thread finds the max flop count for a subset of the vectors
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
int64_t my_flmax = 1, istart, iend ;
GB_PARTITION (istart, iend, nk, tid, nth) ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t kk = kfirst + i ;
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
my_flmax = GB_IMAX (my_flmax, fl) ;
}
Coarse_Work [tid] = my_flmax ;
}
// combine results from each thread
int64_t flmax = 1 ;
for (tid = 0 ; tid < nth ; tid++)
{
flmax = GB_IMAX (flmax, Coarse_Work [tid]) ;
}
// check the parallel computation
#ifdef GB_DEBUG
int64_t flmax2 = 1 ;
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax2 = GB_IMAX (flmax2, fl) ;
}
ASSERT (flmax == flmax2) ;
#endif
//--------------------------------------------------------------------------
// define the coarse task
//--------------------------------------------------------------------------
SaxpyTasks [taskid].start = kfirst ;
SaxpyTasks [taskid].end = klast ;
SaxpyTasks [taskid].vector = -1 ;
SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
SaxpyTasks [taskid].Hi = NULL ; // assigned later
SaxpyTasks [taskid].Hf = NULL ; // assigned later
SaxpyTasks [taskid].Hx = NULL ; // assigned later
SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only
SaxpyTasks [taskid].leader = taskid ;
SaxpyTasks [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3_slice_balanced
(
// inputs
GrB_Matrix C, // output matrix
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
// outputs
GB_saxpy3task_struct **SaxpyTasks_handle,
size_t *SaxpyTasks_size_handle,
bool *apply_mask, // if true, apply M during sapxy3
bool *M_in_place, // if true, use M in-place
int *ntasks, // # of tasks created (coarse and fine)
int *nfine, // # of fine tasks created
int *nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*apply_mask) = false ;
(*M_in_place) = false ;
(*ntasks) = 0 ;
(*nfine) = 0 ;
(*nthreads) = 0 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// define result and workspace
//--------------------------------------------------------------------------
GB_saxpy3task_struct *restrict SaxpyTasks = NULL ;
size_t SaxpyTasks_size = 0 ;
GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks
GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts
GB_WERK_DECLARE (Fine_slice, int64_t) ;
GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j)))
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_nnz_held (B) ;
const int64_t bnvec = B->nvec ;
const int64_t bvlen = B->vlen ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops
GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B,
Context)) ;
int64_t total_flops = Bflops [bnvec] ;
double axbflops = total_flops - Mwork ;
GBURBLE ("axbwork %g ", axbflops) ;
if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
(*apply_mask) = false ;
}
else if (GB_IS_BITMAP (M) || GB_as_if_full (M))
{
//----------------------------------------------------------------------
// M is present and full, bitmap, or sparse/hyper with all entries
//----------------------------------------------------------------------
// Choose all-hash or all-Gustavson tasks, and apply M during saxpy3.
(*apply_mask) = true ;
// The work for M has not yet been added Bflops.
// Each vector M(:,j) has cvlen entries.
Mwork = cvlen * cvdim ;
if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))
{
if (axbflops < (double) Mwork * GB_MWORK_BETA)
{
// The mask is too costly to scatter into the Hf workspace.
// Leave it in place and use all-hash tasks.
AxB_method = GxB_AxB_HASH ;
}
else
{
// Scatter M into Hf and use all-Gustavson tasks.
AxB_method = GxB_AxB_GUSTAVSON ;
}
}
if (AxB_method == GxB_AxB_HASH)
{
// Use the hash method for all tasks (except for those tasks which
// require a hash table size >= cvlen; those tasks use Gustavson).
// Do not scatter the mask into the Hf hash workspace. The work
// for the mask is not accounted for in Bflops, so the hash tables
// can be small.
(*M_in_place) = true ;
GBURBLE ("(use mask in-place) ") ;
}
else
{
// Use the Gustavson method for all tasks, and scatter M into the
// fine Gustavson workspace. The work for M is not yet in the
// Bflops cumulative sum. Add it now.
ASSERT (AxB_method == GxB_AxB_GUSTAVSON)
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] += cvlen * (kk+1) ;
}
total_flops = Bflops [bnvec] ;
GBURBLE ("(use mask) ") ;
}
}
else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA))
{
//----------------------------------------------------------------------
// M is costly to use; apply it after C=A*B
//----------------------------------------------------------------------
// Do not use M during the computation of A*B. Instead, compute C=A*B
// and then apply the mask later. Tell the caller that the mask should
// not be applied, so that it will be applied later in GB_mxm.
(*apply_mask) = false ;
GBURBLE ("(discard mask) ") ;
GB_FREE_ALL ;
return (GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// use M during saxpy3
//----------------------------------------------------------------------
(*apply_mask) = true ;
GBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
(*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
int ntasks_initial = ((*nthreads) == 1) ? 1 :
(GB_NTASKS_PER_THREAD * (*nthreads)) ;
//--------------------------------------------------------------------------
// give preference to Gustavson when using few threads
//--------------------------------------------------------------------------
if ((*nthreads) <= 8 &&
(!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)))
{
// Unless a specific method has been explicitly requested, see if
// Gustavson should be used with a small number of threads.
// Matrix-vector has a maximum intensity of 1, so this heuristic only
// applies to GrB_mxm.
double abnz = GB_nnz (A) + GB_nnz (B) + 1 ;
double workspace = (double) ntasks_initial * (double) cvlen ;
double intensity = total_flops / abnz ;
GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g",
intensity, workspace / abnz) ;
if (intensity >= 8 && workspace < abnz)
{
// work intensity is large, and Gustvason workspace is modest;
// use Gustavson for all tasks
AxB_method = GxB_AxB_GUSTAVSON ;
GBURBLE (": select Gustvason) ") ;
}
else
{
// use default task creation: mix of Hash and Gustavson
GBURBLE (") ") ;
}
}
//--------------------------------------------------------------------------
// determine target task size
//--------------------------------------------------------------------------
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int ncoarse = 0 ; // # of coarse tasks
int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ;
if (Coarse_initial == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ;
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == GBH (Bh, kk)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
(*nfine) += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
(*nfine) = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
(*nfine) = 0 ;
ncoarse = 1 ;
}
}
(*ntasks) = ncoarse + (*nfine) ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
SaxpyTasks = GB_MALLOC_WERK ((*ntasks), GB_saxpy3task_struct,
&SaxpyTasks_size) ;
GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ;
// Fine_fl will only fit on the Werk stack if max_bjnz is small,
// but try anyway, in case it fits. It is placed at the top of the
// Werk stack.
GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ;
}
if (SaxpyTasks == NULL || Coarse_Work == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// clear SaxpyTasks
memset (SaxpyTasks, 0, SaxpyTasks_size) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
SaxpyTasks, nc++, Bflops, cvlen, chunk,
nthreads_max, Coarse_Work, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
int64_t pB_start = GBP (Bp, kk, bvlen) ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
Fine_fl [s] = 1 ;
int64_t pB = pB_start + s ;
if (!GBB (Bb, pB)) continue ;
int64_t k = GBI (Bi, pB, bvlen) ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft,
anvec-1, k, &pA, &pA_end) ;
int64_t fl = pA_end - pA ;
Fine_fl [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false);
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int leader = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ;
SaxpyTasks [nf].start = pB_start + pstart ;
SaxpyTasks [nf].end = pB_start + pend - 1 ;
SaxpyTasks [nf].vector = kk ;
SaxpyTasks [nf].hsize = hsize ;
SaxpyTasks [nf].Hi = NULL ; // assigned later
SaxpyTasks [nf].Hf = NULL ; // assigned later
SaxpyTasks [nf].Hx = NULL ; // assigned later
SaxpyTasks [nf].my_cjnz = 0 ;
SaxpyTasks [nf].leader = leader ;
SaxpyTasks [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task: hash or Gustavson
GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1,
Coarse_Work, AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
SaxpyTasks [0].start = 0 ; // first entry in B(:,0)
SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0)
SaxpyTasks [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*SaxpyTasks_handle) = SaxpyTasks ;
(*SaxpyTasks_size_handle) = SaxpyTasks_size ;
return (GrB_SUCCESS) ;
}
|
parallel_if0.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
int main()
{
// print_frame(0);
#pragma omp parallel if(0)
{
// print_frame(1);
print_ids(0);
print_ids(1);
// print_frame(0);
#pragma omp parallel if(0)
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
// print_frame(0);
#pragma omp task
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
print_ids(3);
}
}
print_fuzzy_address(1);
}
print_fuzzy_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[NESTED_IMPLICIT_TASK_ID]], second_task_id=[[EXPLICIT_TASK_ID]], prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[EXPLICIT_TASK_ID]], second_task_id=[[NESTED_IMPLICIT_TASK_ID]], prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
draw.c | #include "blocko.h"
int is_framebuffer_incomplete()
{
int status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
switch (status)
{
case GL_FRAMEBUFFER_COMPLETE:
return 0;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
printf("framebuffer status: %d incomplete attachment\n", status); return 1;
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
printf("framebuffer status: %d missing attachment\n", status); return 1;
case GL_FRAMEBUFFER_UNSUPPORTED:
printf("framebuffer status: %d unsupported\n", status); return 1;
default:
printf("framebuffer status: %d (unknown)\n", status); return 1;
}
}
int sorter(const void * _a, const void * _b)
{
const struct qitem *a = _a;
const struct qitem *b = _b;
return (a->y == b->y) ? 0 :
(a->y < b->y) ? 1 : -1;
}
int chunk_in_frustum(float *matrix, int chunk_x, int chunk_z)
{
int x_too_lo = 0;
int x_too_hi = 0;
int y_too_lo = 0;
int y_too_hi = 0;
int z_too_lo = 0;
int z_too_hi = 0;
int w_too_lo = 0;
for (int x = 0; x <= 1; x++) for (int z = 0; z <= 1; z++) for (int y = 0; y <= 1; y++)
{
float v[4];
mat4_f3_multiply(v, matrix,
chunk_x*BS*CHUNKW + x*BS*CHUNKW,
0 + y*BS*TILESH, // TODO: use highest gndheight?
chunk_z*BS*CHUNKD + z*BS*CHUNKD);
if (v[0] < -v[3]) x_too_lo++;
if (v[0] > v[3]) x_too_hi++;
if (v[1] < -v[3]) y_too_lo++;
if (v[1] > v[3]) y_too_hi++;
if (v[2] < -v[3]) z_too_lo++;
if (v[2] > v[3]) z_too_hi++;
if (v[3] < 0.f) w_too_lo++;
}
return x_too_lo != 8 && x_too_hi != 8 &&
y_too_lo != 8 && y_too_hi != 8 &&
z_too_lo != 8 && z_too_hi != 8 &&
w_too_lo != 8;
}
// prevent shaking shadows by quantizing sun or moon pitch
float quantize(float p)
{
float quantizer;
float qbracket = sinf(p);
if (qbracket > 0.8f) quantizer = 0.001f;
else if (qbracket > 0.6f) quantizer = 0.0005f;
else if (qbracket > 0.4f) quantizer = 0.00025f;
else if (qbracket > 0.2f) quantizer = 0.000125f;
else quantizer = 0.0000625f;
return roundf(p / quantizer) * quantizer;
}
//draw everything in the game on the screen
void draw_stuff()
{
float identityM[] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
float shadow_space[16];
glDisable(GL_MULTISAMPLE);
float modelM[16];
memcpy(modelM, identityM, sizeof identityM);
// make shadow map
if (shadow_mapping)
{
glBindFramebuffer(GL_FRAMEBUFFER, shadow_fbo);
if (is_framebuffer_incomplete()) goto fb_is_bad;
glViewport(0, 0, SHADOW_SZ, SHADOW_SZ);
glClear(GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(4.f, 4.f);
//render shadows here
glUseProgram(shadow_prog_id);
// view matrix
float viewM[16];
float f[3];
float moon_pitch = sun_pitch + PI;
if (moon_pitch < 0) moon_pitch += TAU;
float quantized_sun_pitch = quantize(sun_pitch);
float quantized_moon_pitch = quantize(moon_pitch);
float yaw = 3.1415926535 * -0.5f;
float dist2sun = (TILESW / 4) * BS;
sun_pos.x = roundf(camplayer.pos.x / BS) * BS + dist2sun * sinf(-yaw) * cosf(quantized_sun_pitch);
sun_pos.y = 100 * BS - dist2sun * sinf(quantized_sun_pitch);
sun_pos.z = roundf(camplayer.pos.z / BS) * BS + dist2sun * cosf(-yaw) * cosf(quantized_sun_pitch);
moon_pos.x = roundf(camplayer.pos.x / BS) * BS + dist2sun * sinf(-yaw) * cosf(quantized_moon_pitch);
moon_pos.y = 100 * BS - dist2sun * sinf(quantized_moon_pitch);
moon_pos.z = roundf(camplayer.pos.z / BS) * BS + dist2sun * cosf(-yaw) * cosf(quantized_moon_pitch);
if (sun_pitch < PI)
{
lookit(viewM, f, sun_pos.x, sun_pos.y, sun_pos.z, quantized_sun_pitch, yaw);
translate(viewM, -sun_pos.x, -sun_pos.y, -sun_pos.z);
}
else
{
lookit(viewM, f, moon_pos.x, moon_pos.y, moon_pos.z, quantized_moon_pitch, yaw);
translate(viewM, -moon_pos.x, -moon_pos.y, -moon_pos.z);
}
// proj matrix
float snear = 10.f; // TODO find closest possible block
float sfar = dist2sun + 9000.f;
float x = 1.f / (6000 / 2.f);
float y = -1.f / (6000 / 2.f);
float z = -1.f / ((sfar - snear) / 2.f);
float tz = -(sfar + snear) / (sfar - snear);
float orthoM[] = {
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, tz, 1,
};
float shadow_pvM[16];
if (!lock_culling)
mat4_multiply(shadow_pvM, orthoM, viewM);
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "proj"), 1, GL_FALSE, orthoM);
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "view"), 1, GL_FALSE, viewM);
glUniform1i(glGetUniformLocation(shadow_prog_id, "tarray"), 0);
glUniform1f(glGetUniformLocation(shadow_prog_id, "BS"), BS);
float biasM[] = {
0.5, 0, 0, 1,
0, 0.5, 0, 1,
0, 0, 0.5, 1,
0.5, 0.5, 0.5, 1,
};
float tmpM[16];
mat4_multiply(tmpM, orthoM, viewM);
mat4_multiply(shadow_space, biasM, tmpM);
for (int i = 0; i < VAOW; i++) for (int j = 0; j < VAOD; j++)
{
if (!VBOLEN_(i, j)) continue;
if (!frustum_culling || chunk_in_frustum(shadow_pvM, i, j))
{
glBindVertexArray(VAO_(i, j));
modelM[12] = i * BS * CHUNKW;
modelM[14] = j * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "model"), 1, GL_FALSE, modelM);
glDrawArrays(GL_POINTS, 0, VBOLEN_(i, j));
shadow_polys += VBOLEN_(i, j);
}
}
fb_is_bad:
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDisable(GL_POLYGON_OFFSET_FILL);
}
float night_amt;
if (sun_pitch < PI) // in the day, linearly change the sky color
{
night_amt = fmodf(sun_pitch + 3*PI2, TAU) / TAU;
if (night_amt > 0.5f) night_amt = 1.f - night_amt;
night_amt *= 2.f;
}
else // at night change via cubic-sine so that it's mostly dark all night
{
night_amt = 1.f + sinf(sun_pitch); // 0 to 1
night_amt *= night_amt * night_amt; // 0 to 1
night_amt *= -0.5f; //-.5 to 0
night_amt += 1.f; // 1 to .5
}
if (night_amt > 0.5f)
{
fog_r = lerp(2.f*(night_amt - 0.5f), FOG_DUSK_R, FOG_NIGHT_R);
fog_g = lerp(2.f*(night_amt - 0.5f), FOG_DUSK_G, FOG_NIGHT_G);
fog_b = lerp(2.f*(night_amt - 0.5f), FOG_DUSK_B, FOG_NIGHT_B);
}
else
{
fog_r = lerp(2.f*night_amt, FOG_DAY_R, FOG_DUSK_R);
fog_g = lerp(2.f*night_amt, FOG_DAY_G, FOG_DUSK_G);
fog_b = lerp(2.f*night_amt, FOG_DAY_B, FOG_DUSK_B);
}
glViewport(0, 0, screenw, screenh);
glClearColor(fog_r, fog_g, fog_b, 1.f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (antialiasing)
glEnable(GL_MULTISAMPLE);
// compute proj matrix
float near = 8.f;
float far = 99999.f;
float frustw = 4.5f * zoom_amt * screenw / screenh;
float frusth = 4.5f * zoom_amt;
float projM[] = {
near/frustw, 0, 0, 0,
0, near/frusth, 0, 0,
0, 0, -(far + near) / (far - near), -1,
0, 0, -(2.f * far * near) / (far - near), 0
};
// compute view matrix
float eye0 = lerped_pos.x + PLYR_W / 2;
float eye1 = lerped_pos.y + EYEDOWN * (camplayer.sneaking ? 2 : 1);
float eye2 = lerped_pos.z + PLYR_W / 2;
float f[3];
float viewM[16];
lookit(viewM, f, eye0, eye1, eye2, camplayer.pitch, camplayer.yaw);
sun_draw(projM, viewM, sun_pitch, shadow_tex_id);
// find where we are pointing at
rayshot(eye0, eye1, eye2, f[0], f[1], f[2]);
// translate by hand
float translated_viewM[16];
memcpy(translated_viewM, viewM, sizeof viewM);
translate(translated_viewM, -eye0, -eye1, -eye2);
static float pvM[16];
if (!lock_culling)
mat4_multiply(pvM, projM, translated_viewM);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glUseProgram(prog_id);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, material_tex_id);
glUniform1i(glGetUniformLocation(prog_id, "tarray"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, shadow_tex_id);
glUniform1i(glGetUniformLocation(prog_id, "shadow_map"), 1);
glUniform1i(glGetUniformLocation(prog_id, "shadow_mapping"), shadow_mapping);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "proj"), 1, GL_FALSE, projM);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "view"), 1, GL_FALSE, translated_viewM);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "shadow_space"), 1, GL_FALSE, shadow_space);
glUniform1f(glGetUniformLocation(prog_id, "BS"), BS);
if (sun_pitch < PI)
glUniform3f(glGetUniformLocation(prog_id, "light_pos"), sun_pos.x, sun_pos.y, sun_pos.z);
else
glUniform3f(glGetUniformLocation(prog_id, "light_pos"), moon_pos.x, moon_pos.y, moon_pos.z);
glUniform3f(glGetUniformLocation(prog_id, "view_pos"), eye0, eye1, eye2);
{
float m = ICLAMP(night_amt * 2.f, 0.f, 1.f);
glUniform1f(glGetUniformLocation(prog_id, "sharpness"), m*m*m*(m*(m*6.f-15.f)+10.f));
float r = lerp(night_amt, DAY_R, NIGHT_R);
float g = lerp(night_amt, DAY_G, NIGHT_G);
float b = lerp(night_amt, DAY_B, NIGHT_B);
glUniform3f(glGetUniformLocation(prog_id, "day_color"), r, g, b);
glUniform3f(glGetUniformLocation(prog_id, "glo_color"), 0.92f, 0.83f, 0.69f);
glUniform3f(glGetUniformLocation(prog_id, "fog_color"), fog_r, fog_g, fog_b);
}
// determine which chunks to send to gl
TIMER(rings)
int x0 = (eye0 - BS * CHUNKW2) / (BS * CHUNKW);
int z0 = (eye2 - BS * CHUNKW2) / (BS * CHUNKD);
CLAMP(x0, 0, VAOW - 2);
CLAMP(z0, 0, VAOD - 2);
int x1 = x0 + 1;
int z1 = z0 + 1;
int x0d = ((x0 * BS * CHUNKW + BS * CHUNKW2) - eye0);
int x1d = ((x1 * BS * CHUNKW + BS * CHUNKW2) - eye0);
int z0d = ((z0 * BS * CHUNKD + BS * CHUNKD2) - eye2);
int z1d = ((z1 * BS * CHUNKD + BS * CHUNKD2) - eye2);
// initialize with ring0 chunks
struct qitem fresh[VAOW*VAOD] = { // chunkx, distance sq, chunkz
{x0, (x0d * x0d + z0d * z0d), z0},
{x0, (x0d * x0d + z1d * z1d), z1},
{x1, (x1d * x1d + z0d * z0d), z0},
{x1, (x1d * x1d + z1d * z1d), z1}
};
size_t fresh_len = 4;
qsort(fresh, fresh_len, sizeof(struct qitem), sorter);
#pragma omp critical
{
memcpy(fresh + fresh_len,
(struct qitem *)just_generated,
just_gen_len * sizeof *just_generated);
fresh_len += just_gen_len;
just_gen_len = 0;
}
// position within each ring that we're at this frame
static struct qitem ringpos[VAOW + VAOD] = {0};
for (int r = 1; r < VAOW + VAOD; r++)
{
// expand ring in all directions
x0--; x1++; z0--; z1++;
// freshen farther rings less and less often
if (r >= 3 && r <= 6 && frame % 2 != r % 2) continue;
if (r >= 7 && r <= 14 && frame % 4 != r % 4) continue;
if (r >= 15 && r <= 30 && frame % 8 != r % 8) continue;
if (r >= 31 && frame % 16 != r % 16) continue;
int *x = &ringpos[r].x;
int *z = &ringpos[r].z;
// move to next chunk, maybe on ring
--(*x);
// wrap around the ring
int x_too_low = (*x < x0);
if (x_too_low) { *x = x1; --(*z); }
// reset if out of the ring
int z_too_low = (*z < z0);
if (z_too_low) { *x = x1; *z = z1; }
// get out of the middle
int is_on_ring = (*z == z0 || *z == z1 || *x == x1);
if (!is_on_ring) { *x = x0; }
// render if in bounds
if (*x >= 0 && *x < VAOW && *z >= 0 && *z < VAOD)
{
fresh[fresh_len].x = *x;
fresh[fresh_len].z = *z;
fresh_len++;
}
}
// render non-fresh chunks
TIMER(drawstale)
struct qitem stale[VAOW * VAOD] = {0}; // chunkx, distance sq, chunkz
size_t stale_len = 0;
for (int i = 0; i < VAOW; i++) for (int j = 0; j < VAOD; j++)
{
// skip chunks we will draw fresh this frame
size_t limit = show_fresh_updates ? fresh_len : 4;
for (size_t k = 0; k < limit; k++)
if (fresh[k].x == i && fresh[k].z == j)
goto skip;
stale[stale_len].x = i;
stale[stale_len].z = j;
int xd = ((i * BS * CHUNKW + BS * CHUNKW2) - eye0);
int zd = ((j * BS * CHUNKD + BS * CHUNKD2) - eye2);
stale[stale_len].y = (xd * xd + zd * zd);
// only queue chunks we could see
if (chunk_in_frustum(pvM, i, j))
stale_len++;
skip: ;
}
qsort(stale, stale_len, sizeof *stale, sorter);
for (size_t my = 0; my < stale_len; my++)
{
int myx = stale[my].x;
int myz = stale[my].z;
modelM[12] = myx * BS * CHUNKW;
modelM[14] = myz * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(prog_id, "model"), 1, GL_FALSE, modelM);
glBindVertexArray(VAO_(myx, myz));
glDrawArrays(GL_POINTS, 0, VBOLEN_(myx, myz));
polys += VBOLEN_(myx, myz);
}
// package, ship and render fresh chunks (while the stales are rendering!)
TIMER(buildvbo);
for (size_t my = 0; my < fresh_len; my++)
{
int myx = fresh[my].x;
int myz = fresh[my].z;
int xlo = myx * CHUNKW;
int xhi = xlo + CHUNKW;
int zlo = myz * CHUNKD;
int zhi = zlo + CHUNKD;
int ungenerated = false;
#pragma omp critical
if (!AGEN_(myx, myz))
{
ungenerated = true;
}
if (ungenerated)
continue; // don't bother with ungenerated chunks
glBindVertexArray(VAO_(myx, myz));
glBindBuffer(GL_ARRAY_BUFFER, VBO_(myx, myz));
v = vbuf; // reset vertex buffer pointer
w = wbuf; // same for water buffer
TIMER(buildvbo);
for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH; y++) for (int x = xlo; x < xhi; x++)
{
if (v >= v_limit) break; // out of vertex space, shouldnt reasonably happen
if (w >= w_limit) w -= 10; // just overwrite water if we run out of space
if (T_(x, y, z) == OPEN && (!show_light_values || !in_test_area(x, y, z)))
continue;
//lighting
float usw = CORN_(x , y , z );
float use = CORN_(x+1, y , z );
float unw = CORN_(x , y , z+1);
float une = CORN_(x+1, y , z+1);
float dsw = CORN_(x , y+1, z );
float dse = CORN_(x+1, y+1, z );
float dnw = CORN_(x , y+1, z+1);
float dne = CORN_(x+1, y+1, z+1);
float USW = KORN_(x , y , z );
float USE = KORN_(x+1, y , z );
float UNW = KORN_(x , y , z+1);
float UNE = KORN_(x+1, y , z+1);
float DSW = KORN_(x , y+1, z );
float DSE = KORN_(x+1, y+1, z );
float DNW = KORN_(x , y+1, z+1);
float DNE = KORN_(x+1, y+1, z+1);
int t = T_(x, y, z);
int m = x & (CHUNKW-1);
int n = z & (CHUNKD-1);
if (t == GRAS)
{
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ 0, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ 1, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ 1, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ 1, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ 1, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ 2, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == DIRT || t == GRG1 || t == GRG2)
{
int u = (t == DIRT) ? 2 :
(t == GRG1) ? 3 : 4;
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ u, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ 2, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ 2, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ 2, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ 2, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ 2, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == STON || t == SAND || t == ORE || t == OREH || t == HARD || t == WOOD || t == GRAN ||
t == RLEF || t == YLEF)
{
int f = (t == STON) ? 5 :
(t == SAND) ? 6 :
(t == ORE ) ? 11 :
(t == OREH) ? 12 :
(t == HARD) ? 13 :
(t == WOOD) ? 14 :
(t == GRAN) ? 15 :
(t == RLEF) ? 16 :
(t == YLEF) ? 17 :
0 ;
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ f, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ f, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ f, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ f, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ f, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ f, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == WATR)
{
if (y == 0 || T_(x , y-1, z ) == OPEN)
{
int f = 7 + (pframe / 10 + (x ^ z)) % 4;
*w++ = (struct vbufv){ f, UP, m, y+0.06f, n, usw, use, unw, une, USW, USE, UNW, UNE, 0.5f };
*w++ = (struct vbufv){ f, DOWN, m, y-0.94f, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 0.5f };
}
}
else if (t == LITE)
{
*w++ = (struct vbufv){ 18, SOUTH, m , y, n+0.5f, use, usw, dse, dsw, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, NORTH, m , y, n-0.5f, unw, une, dnw, dne, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, WEST, m+0.5f, y, n , usw, unw, dsw, dnw, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, EAST, m-0.5f, y, n , une, use, dne, dse, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
}
if (show_light_values && in_test_area(x, y, z))
{
int f = GLO_(x, y, z) + PNG0;
int ty = y;
float lit = 1.f;
if (IS_OPAQUE(x, y, z))
{
ty = y - 1;
lit = 0.1f;
}
*w++ = (struct vbufv){ f, UP, m, ty+0.9f, n, lit, lit, lit, lit, lit, lit, lit, lit, 1.f };
*w++ = (struct vbufv){ f, DOWN, m, ty-0.1f, n, lit, lit, lit, lit, lit, lit, lit, lit, 1.f };
}
}
if (w - wbuf < v_limit - v) // room for water in vertex buffer?
{
memcpy(v, wbuf, (w - wbuf) * sizeof *wbuf);
v += w - wbuf;
}
VBOLEN_(myx, myz) = v - vbuf;
polys += VBOLEN_(myx, myz);
TIMER(glBufferData)
glBufferData(GL_ARRAY_BUFFER, VBOLEN_(myx, myz) * sizeof *vbuf, vbuf, GL_STATIC_DRAW);
if (my < 4) // draw the newly buffered verts
{
TIMER(glDrawArrays)
modelM[12] = myx * BS * CHUNKW;
modelM[13] = 0.f;
modelM[14] = myz * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(prog_id, "model"), 1, GL_FALSE, modelM);
glDrawArrays(GL_POINTS, 0, VBOLEN_(myx, myz));
}
}
debrief();
TIMER(swapwindow);
SDL_GL_SwapWindow(win);
TIMER();
}
|
OMPI_block-ms.c | #ifdef _OPENMP
#include <omp.h>
#endif
#include <mpi.h>
#include <cover_functions.h>
#define FACTOR 2
unsigned int nb_MPI_proc, my_MPI_rank, nb_OMP_thread;
unsigned int * thread_per_proc, * dec_th_per_proc;
void receive_pb_data(struct instance_t ** instance, struct context_t *** ctxs, int * common_item)
{
*instance = (struct instance_t *) malloc(sizeof(struct instance_t));
*ctxs = (struct context_t **) malloc(nb_OMP_thread * sizeof(struct context_t *));
dec_th_per_proc = (unsigned int *) malloc(sizeof(unsigned int));
MPI_Gather(&nb_OMP_thread, 1, MPI_UNSIGNED, NULL, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Scatter(NULL, 1, MPI_UNSIGNED, dec_th_per_proc, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
int buffer[4];
MPI_Bcast(buffer, 4, MPI_INT, 0, MPI_COMM_WORLD);
(*instance)->n_items = buffer[0];
(*instance)->n_primary = buffer[1];
(*instance)->n_options = buffer[2];
*common_item = buffer[3];
(*instance)->item_name = NULL;
(*instance)->options = (int *) malloc((*instance)->n_options * (*instance)->n_items * sizeof(int));
MPI_Bcast((*instance)->options, (*instance)->n_options * (*instance)->n_items, MPI_INT, 0, MPI_COMM_WORLD);
(*instance)->ptr = (int *) malloc(((*instance)->n_options + 1) * sizeof(int));
MPI_Bcast((*instance)->ptr, (*instance)->n_options + 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("On process %u : received %d items & %d options.\n", my_MPI_rank, (*instance)->n_items, (*instance)->n_options);
#pragma omp parallel for schedule(static,1)
for (unsigned int i = 0; i < nb_OMP_thread; ++i)
{
(*ctxs)[i] = backtracking_setup(*instance);
(*ctxs)[i]->nodes = 1;
}
printf("On process %u : %u contexts created.\n", my_MPI_rank, nb_OMP_thread);
}
void send_pb_data(struct instance_t ** instance, struct context_t ** ctx, int * common_item)
{
*instance = load_matrix(in_filename);
*ctx = backtracking_setup(*instance);
*common_item = choose_next_item(*ctx);
printf("From Master : got %d items, %d primary & %d options to broadcast.\n",
(*instance)->n_items, (*instance)->n_primary, (*instance)->n_options);
thread_per_proc = (unsigned int *) malloc(nb_MPI_proc * sizeof(unsigned int));
MPI_Gather(&nb_OMP_thread, 1, MPI_UNSIGNED, thread_per_proc, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
dec_th_per_proc = (unsigned int *) malloc(nb_MPI_proc * sizeof(unsigned int));
dec_th_per_proc[0] = 0U; thread_per_proc[0] = 0U;
for (unsigned int i = 0; i < nb_MPI_proc; ++i)
dec_th_per_proc[(i + 1) % nb_MPI_proc] = dec_th_per_proc[i] + thread_per_proc[i] * FACTOR;
MPI_Scatter(dec_th_per_proc, 1, MPI_UNSIGNED, dec_th_per_proc, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
int buffer[4] = {(*instance)->n_items, (*instance)->n_primary, (*instance)->n_options, *common_item};
MPI_Bcast(buffer, 4, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast((*instance)->options, (*instance)->n_options * (*instance)->n_items, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast((*instance)->ptr, (*instance)->n_options + 1, MPI_INT, 0, MPI_COMM_WORLD);
}
void solve_OMP(const struct instance_t * instance, struct context_t ** ctxs, int chosen_item, long long int * buffer)
{
unsigned int i;
struct sparse_array_t * active_options = ctxs[0]->active_options[chosen_item];
long long int nb_nodes = buffer[1] + 1;
long long int nb_sol = buffer[2];
#pragma omp parallel for schedule(static,1)
for (i = 0; i < nb_OMP_thread; ++i)
{
ctxs[i]->nodes = nb_nodes;
ctxs[i]->solutions = nb_sol;
}
unsigned int min_bound = buffer[0] + nb_OMP_thread * FACTOR < active_options->len ?
(unsigned int) buffer[0] + nb_OMP_thread * FACTOR : (unsigned int) active_options->len;
#pragma omp parallel for schedule(dynamic)
for (i = buffer[0]; i < min_bound; ++i)
{
unsigned short int my_thread = omp_get_thread_num();
int option = active_options->p[i];
ctxs[my_thread]->child_num[0] = i;
choose_option(instance, ctxs[my_thread], option, chosen_item);
solve(instance, ctxs[my_thread]);
if (ctxs[my_thread]->solutions >= max_solutions)
exit(EXIT_SUCCESS);
unchoose_option(instance, ctxs[my_thread], option, chosen_item);
}
buffer[1] = 0LL; buffer[2] = 0LL;
#pragma omp parallel for schedule(static,1) reduction(+:buffer[1])
for (i = 0; i < nb_OMP_thread; ++i)
buffer[1] += ctxs[i]->nodes - nb_nodes;
#pragma omp parallel for schedule(static,1) reduction(+:buffer[2])
for (i = 0; i < nb_OMP_thread; ++i)
buffer[2] += ctxs[i]->solutions - nb_sol;
}
void solve_OMPI_master(struct context_t * ctx, int chosen_item, bool debug)
{
MPI_Status status;
bool end = false;
unsigned int i = dec_th_per_proc[0];
long long int com_buffer[3] = {0LL, 0LL, 0LL}; // node_id / nb_nodes / nb_sol
long long int nb_nodes = 0LL, nb_sol = 0LL;
while (!end) {
MPI_Recv(com_buffer, 3, MPI_LONG_LONG_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
switch (status.MPI_TAG)
{
case 2 :
if (debug)
printf("Received branches %lld:%lld with %lld new solutions from process %d.\n", com_buffer[0],
com_buffer[0] + thread_per_proc[status.MPI_SOURCE] * FACTOR - 1, com_buffer[2], status.MPI_SOURCE);
nb_nodes += com_buffer[1];
nb_sol += com_buffer[2];
com_buffer[0] = (long long int) i;
com_buffer[1] = nb_nodes;
com_buffer[2] = nb_sol;
i += thread_per_proc[status.MPI_SOURCE] * FACTOR;
if (nb_sol < max_solutions && i < (unsigned int) ctx->active_options[chosen_item]->len)
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, status.MPI_SOURCE, 1, MPI_COMM_WORLD);
else {
end = true;
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, status.MPI_SOURCE, 99, MPI_COMM_WORLD);
}
break;
default:
com_buffer[1] = nb_nodes;
com_buffer[2] = nb_sol;
printf("Received unknown type communication. Resetting process %d.\n", status.MPI_SOURCE);
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, status.MPI_SOURCE, 2, MPI_COMM_WORLD);
break;
}
}
for (i = 1; i < nb_MPI_proc - 1; ++i) {
MPI_Recv(com_buffer, 3, MPI_LONG_LONG_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (debug)
printf("Received branches %lld:%lld with %lld new solutions from process %d.\n", com_buffer[0],
com_buffer[0] + thread_per_proc[status.MPI_SOURCE] * FACTOR - 1, com_buffer[2], status.MPI_SOURCE);
nb_nodes += com_buffer[1];
nb_sol += com_buffer[2];
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, status.MPI_SOURCE, 99, MPI_COMM_WORLD);
}
ctx->solutions = nb_sol;
ctx->nodes = nb_nodes;
}
void solve_OMPI_slave(const struct instance_t * instance, struct context_t ** ctxs, int chosen_item, bool debug)
{
MPI_Status status;
bool end = false;
unsigned int i = 0;
long long int com_buffer[3] = { (long long int) *dec_th_per_proc, 0LL, 0LL}; // node_id / nb_nodes / nb_sol
struct sparse_array_t * active_options = ctxs[0]->active_options[chosen_item];
if (sparse_array_empty(active_options))
return; /* échec : impossible de couvrir chosen_item */
#pragma omp parallel for schedule(static, 1)
for (i = 0; i < nb_OMP_thread; ++i)
{
cover(instance, ctxs[i], chosen_item);
ctxs[i]->num_children[0] = active_options->len;
}
solve_OMP(instance, ctxs, chosen_item, com_buffer);
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, 0, 2, MPI_COMM_WORLD);
while (!end) {
MPI_Recv(com_buffer, 3, MPI_LONG_LONG_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
switch (status.MPI_TAG)
{
case 1 :
if (debug)
printf("Node %u received branch %llu to solve.\n", my_MPI_rank, com_buffer[0]);
solve_OMP(instance, ctxs, chosen_item, com_buffer);
MPI_Send(com_buffer, 3, MPI_LONG_LONG_INT, 0, 2, MPI_COMM_WORLD);
break;
case 99 :
if (debug)
printf("Stop received on node %u.\n", my_MPI_rank);
end = true;
break;
}
}
#pragma omp parallel for schedule(static,1)
for (i = 0; i < nb_OMP_thread; ++i)
uncover(instance, ctxs[i], chosen_item); /* backtrack */
}
int main(int argc, char **argv)
{
option_setup(argc, argv);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, (int *) &nb_MPI_proc);
MPI_Comm_rank(MPI_COMM_WORLD, (int *) &my_MPI_rank);
nb_OMP_thread = (unsigned int) omp_get_max_threads();
printf("Hello from process %u! I have %u threads available.\n", my_MPI_rank, nb_OMP_thread);
struct instance_t * instance = NULL;
int common_item;
if (my_MPI_rank) {
struct context_t ** ctxs = NULL;
receive_pb_data(&instance, &ctxs, &common_item);
MPI_Barrier(MPI_COMM_WORLD);
start = wtime();
solve_OMPI_slave(instance, ctxs, common_item, false);
MPI_Barrier(MPI_COMM_WORLD);
} else {
struct context_t * ctx = NULL;
send_pb_data(&instance, &ctx, &common_item);
MPI_Barrier(MPI_COMM_WORLD);
start = wtime();
solve_OMPI_master(ctx, common_item, false);
MPI_Barrier(MPI_COMM_WORLD);
printf("FINI. Trouvé %lld solutions en %.2fs, %lld noeud parcouru\n", ctx->solutions, wtime() - start, ctx->nodes);
}
MPI_Finalize();
exit(EXIT_SUCCESS);
}
|
linalg.h | /* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*/
/* \file
* toolbox Linalg
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File linalg.h
* \brief Contains Matrix, Vector classes */
#ifndef LINALG_H
#define LINALG_H
#include "misc.h"
#include "cblas_alt_template.h"
#include <fstream>
#ifdef WINDOWS
#include <string>
#else
#include <cstring>
#endif
#include "utils.h"
#undef max
#undef min
/// Dense Matrix class
template<typename T> class Matrix;
/// Sparse Matrix class
template<typename T, typename I = INTM> class SpMatrix;
/// Dense Vector class
template<typename T> class Vector;
/// Sparse Vector class
template<typename T, typename I = INTM> class SpVector;
template <typename T>
static inline bool isZero(const T lambda) {
return static_cast<double>(abs<T>(lambda)) < 1e-99;
}
template <typename T>
static inline bool isEqual(const T lambda1, const T lambda2) {
return static_cast<double>(abs<T>(lambda1-lambda2)) < 1e-99;
}
template <typename T>
static inline T softThrs(const T x, const T lambda) {
if (x > lambda) {
return x-lambda;
} else if (x < -lambda) {
return x+lambda;
} else {
return 0;
}
};
template <typename T>
static inline T fastSoftThrs(const T x, const T lambda) {
return x + T(0.5)*(abs<T>(x-lambda) - abs<T>(x+lambda));
};
template <typename T>
static inline T hardThrs(const T x, const T lambda) {
return (x > lambda || x < -lambda) ? x : 0;
};
template <typename T>
static inline T xlogx(const T x) {
if (x < -1e-20) {
return INFINITY;
} else if (x < 1e-20) {
return 0;
} else {
return x*alt_log<T>(x);
}
}
template <typename T>
static inline T logexp(const T x) {
if (x < -30) {
return 0;
} else if (x < 30) {
return alt_log<T>( T(1.0) + exp_alt<T>( x ) );
} else {
return x;
}
}
template <typename T>
static inline T logexp2(const T x) {
return (x > 0) ? x + log_alt<T>(T(1.0)+ exp_alt<T>(-x)) :
log( T(1.0) + exp_alt<T>( x ) );
}
/// Class Matrix
template<typename T> class Matrix {
friend class SpMatrix<T>;
public:
typedef T value_type;
typedef Vector<T> col_type;
typedef INTM index_type;
typedef Vector<T> element;
/// Constructor with existing data X of an m x n matrix
Matrix(T* X, INTM m, INTM n);
/// Constructor for a new m x n matrix
Matrix(INTM m, INTM n);
/// Empty constructor
Matrix();
/// Destructor
virtual ~Matrix();
/// Accessors
/// Number of rows
inline INTM m() const { return _m; };
/// Number of columns
inline INTM n() const { return _n; };
/// size
inline INTM size() const { return _n*_m; };
/// Return a modifiable reference to X(i,j)
inline T& operator()(const INTM i, const INTM j);
/// Return the value X(i,j)
inline T operator()(const INTM i, const INTM j) const;
/// Return a modifiable reference to X(i) (1D indexing)
inline T& operator[](const INTM index) { return _X[index]; };
/// Return the value X(i) (1D indexing)
inline T operator[](const INTM index) const { return _X[index]; };
/// Copy the column i into x
inline void copyCol(const INTM i, Vector<T>& x) const;
/// Copy the column i into x
inline void copyRow(const INTM i, Vector<T>& x) const;
inline void scalRow(const INTM i, const T s) const;
inline void copyToRow(const INTM i, const Vector<T>& x);
/// Copy the column i into x
inline void extract_rawCol(const INTM i, T* x) const;
/// Copy the column i into x
virtual void add_rawCol(const INTM i, T* DtXi, const T a) const;
/// Copy the column i into x
inline void getData(Vector<T>& data, const INTM i) const;
/// Reference the column i into the vector x
inline void refCol(INTM i, Vector<T>& x) const;
/// Reference the column i to i+n into the Matrix mat
inline void refSubMat(INTM i, INTM n, Matrix<T>& mat) const;
/// extract a sub-matrix of a symmetric matrix
inline void subMatrixSym(const Vector<INTM>& indices,
Matrix<T>& subMatrix) const;
/// reference a modifiable reference to the data, DANGEROUS
inline T* rawX() const { return _X; };
/// return a non-modifiable reference to the data
inline const T* X() const { return _X; };
/// make a copy of the matrix mat in the current matrix
inline void copy(const Matrix<T>& mat);
/// make a copy of the matrix mat in the current matrix
inline void copyTo(Matrix<T>& mat) const { mat.copy(*this); };
/// make a copy of the matrix mat in the current matrix
inline void copyRef(const Matrix<T>& mat);
/// Debugging function
/// Print the matrix to std::cout
inline void print(const string& name) const;
inline void dump(const string& name) const;
/// Modifiers
/// clean a dictionary matrix
inline void clean();
/// Resize the matrix
inline void resize(INTM m, INTM n, const bool set_zeros = true);
/// Change the data in the matrix
inline void setData(T* X, INTM m, INTM n);
/// Change the data in the matrix
inline void refData(const Matrix<T>& mat) {
this->setData(mat.rawX(),mat.m(),mat.n());
};
/// modify _m
inline void setm(const INTM m) { _m = m; }; //DANGEROUS
/// modify _n
inline void setn(const INTM n) { _n = n; }; //DANGEROUS
/// Set all the values to zero
inline void setZeros();
/// Set all the values to a scalar
inline void set(const T a);
/// Clear the matrix
inline void clear();
/// Put white Gaussian noise in the matrix
inline void setAleat();
/// set the matrix to the identity;
inline void eye();
/// Normalize all columns to unit l2 norm
inline void normalize();
/// Normalize all columns which l2 norm is greater than one.
inline void normalize2();
/// center the columns of the matrix
inline void center();
/// center the columns of the matrix
inline void center_rows();
/// center the columns of the matrix
inline void normalize_rows();
/// center the columns of the matrix and keep the center values
inline void center(Vector<T>& centers);
/// scale the matrix by the a
inline void scal(const T a);
/// make the matrix symmetric by copying the upper-right part
/// into the lower-left part
inline void fillSymmetric();
inline void fillSymmetric2();
/// change artificially the size of the matrix, DANGEROUS
inline void fakeSize(const INTM m, const INTM n) { _n = n; _m=m;};
/// whiten
inline void whiten(const INTM V);
/// whiten
inline void whiten(Vector<T>& mean, const bool pattern = false);
/// whiten
inline void whiten(Vector<T>& mean, const Vector<T>& mask);
/// whiten
inline void unwhiten(Vector<T>& mean, const bool pattern = false);
/// whiten
inline void sum_cols(Vector<T>& sum) const;
/// Analysis functions
/// Check wether the columns of the matrix are normalized or not
inline bool isNormalized() const;
/// return the 1D-index of the value of greatest magnitude
inline INTM fmax() const;
/// return the 1D-index of the value of greatest magnitude
inline T fmaxval() const;
/// return the 1D-index of the value of lowest magnitude
inline INTM fmin() const;
// Algebric operations
/// Transpose the current matrix and put the result in the matrix
/// trans
inline void transpose(Matrix<T>& trans) const;
/// A <- -A
inline void neg();
/// add one to the diagonal
inline void incrDiag();
inline void addDiag(const Vector<T>& diag);
inline void addDiag(const T diag);
inline void addToCols(const Vector<T>& diag);
inline void addVecToCols(const Vector<T>& diag, const T a = 1.0);
/// perform a rank one approximation uv' using the power method
/// u0 is an initial guess for u (can be empty).
inline void svdRankOne(const Vector<T>& u0,
Vector<T>& u, Vector<T>& v) const;
inline void singularValues(Vector<T>& u) const;
inline void svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const;
inline void svd2(Matrix<T>& U, Vector<T>& S, const int num = -1, const int method = 0) const;
inline void SymEig(Matrix<T>& U, Vector<T>& S) const;
inline void InvsqrtMat(Matrix<T>& out, const T lambda = 0) const;
inline void sqrtMat(Matrix<T>& out) const;
// inline void Inv(Matrix<T>& out) const;
/// find the eigenvector corresponding to the largest eigenvalue
/// when the current matrix is symmetric. u0 is the initial guess.
/// using two iterations of the power method
inline void eigLargestSymApprox(const Vector<T>& u0,
Vector<T>& u) const;
/// find the eigenvector corresponding to the eivenvalue with the
/// largest magnitude when the current matrix is symmetric,
/// using the power method. It
/// returns the eigenvalue. u0 is an initial guess for the
/// eigenvector.
inline T eigLargestMagnSym(const Vector<T>& u0,
Vector<T>& u) const;
/// returns the value of the eigenvalue with the largest magnitude
/// using the power iteration.
inline T eigLargestMagnSym() const;
/// inverse the matrix when it is symmetric
inline void invSym();
inline void invSymPos();
/// perform b = alpha*A'x + beta*b
inline void multTrans(const Vector<T>& x, Vector<T>& b,
const T alpha = 1.0, const T beta = 0.0) const;
/// perform b = alpha*A'x + beta*b
inline void multTrans(const Vector<T>& x, Vector<T>& b,
const Vector<bool>& active) const;
/// perform b = A'x, when x is sparse
template <typename I>
inline void multTrans(const SpVector<T,I>& x, Vector<T>& b, const T alpha =1.0, const T beta = 0.0) const;
/// perform b = alpha*A*x+beta*b
inline void mult(const Vector<T>& x, Vector<T>& b,
const T alpha = 1.0, const T beta = 0.0) const;
inline void mult_loop(const Vector<T>& x, Vector<T>& b) const;
/// perform b = alpha*A*x + beta*b, when x is sparse
template <typename I>
inline void mult(const SpVector<T,I>& x, Vector<T>& b,
const T alpha = 1.0, const T beta = 0.0) const;
template <typename I>
inline void mult_loop(const SpVector<T,I>& x, Vector<T>& b) const {
this->mult(x,b);
}
/// perform C = a*A*B + b*C, possibly transposing A or B.
inline void mult(const Matrix<T>& B, Matrix<T>& C,
const bool transA = false, const bool transB = false,
const T a = 1.0, const T b = 0.0) const;
/// perform C = a*B*A + b*C, possibly transposing A or B.
inline void multSwitch(const Matrix<T>& B, Matrix<T>& C,
const bool transA = false, const bool transB = false,
const T a = 1.0, const T b = 0.0) const;
/// perform C = A*B, when B is sparse
template <typename I>
inline void mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA = false,
const bool transB = false, const T a = 1.0,
const T b = 0.0) const;
/// mult by a diagonal matrix on the left
inline void multDiagLeft(const Vector<T>& diag);
/// mult by a diagonal matrix on the right
inline void multDiagRight(const Vector<T>& diag);
/// mult by a diagonal matrix on the right
inline void AddMultDiagRight(const Vector<T>& diag, Matrix<T>& mat);
/// C = A .* B, elementwise multiplication
inline void mult_elementWise(const Matrix<T>& B, Matrix<T>& C) const;
inline void div_elementWise(const Matrix<T>& B, Matrix<T>& C) const;
/// XtX = A'*A
inline void XtX(Matrix<T>& XtX) const;
/// XXt = A*A'
inline void XXt(Matrix<T>& XXt) const;
/// XXt = A*A' where A is an upper triangular matrix
inline void upperTriXXt(Matrix<T>& XXt,
const INTM L) const;
/// extract the diagonal
inline void diag(Vector<T>& d) const;
/// set the diagonal
inline void setDiag(const Vector<T>& d);
/// set the diagonal
inline void setDiag(const T val);
/// each element of the matrix is replaced by its exponential
inline void exp();
/// each element of the matrix is replaced by its square root
inline void pow(const T a);
inline void Sqrt();
inline void Invsqrt();
inline void sqr();
/// return vec1'*A*vec2, where vec2 is sparse
template <typename I>
inline T quad(const Vector<T>& vec1, const SpVector<T,I>& vec2) const;
/// return vec1'*A*vec2, where vec2 is sparse
template <typename I>
inline void quad_mult(const Vector<T>& vec1, const SpVector<T,I>& vec2,
Vector<T>& y, const T a = 1.0, const T b = 0.0) const;
/// return vec'*A*vec when vec is sparse
template <typename I>
inline T quad(const SpVector<T,I>& vec) const;
/// add alpha*mat to the current matrix
inline void add(const Matrix<T>& mat, const T alpha = 1.0);
/// add alpha*mat to the current matrix
inline void add_scal(const Matrix<T>& mat, const T alpha = 1.0, const T beta = 1.0);
/// add alpha to the current matrix
inline void add(const T alpha);
/// add alpha*mat to the current matrix
inline T dot(const Matrix<T>& mat) const;
/// substract the matrix mat to the current matrix
inline void sub(const Matrix<T>& mat);
/// inverse the elements of the matrix
inline void inv_elem();
/// inverse the elements of the matrix
inline void inv() { this->inv_elem(); };
/// return the trace of the matrix
inline T trace() const;
/// compute the sum of the magnitude of the matrix values
inline T asum() const;
/// compute the sum of the magnitude of the matrix values
inline T sum() const;
/// return ||A||_F
inline T normF() const;
/// whiten
inline T mean() const;
/// whiten
inline T abs_mean() const;
/// whiten
/// return ||A||_F^2
inline T normFsq() const;
/// return ||A||_F^2
inline T nrm2sq() const { return this->normFsq(); };
/// return ||At||_{inf,2} (max of l2 norm of the columns)
inline T norm_inf_2_col() const;
/// return ||At||_{1,2} (max of l2 norm of the columns)
inline T norm_1_2_col() const;
/// returns the l2 norms of the columns
inline void norm_2_cols(Vector<T>& norms) const;
/// returns the l2 norms of the columns
inline void norm_2_rows(Vector<T>& norms) const;
/// returns the linf norms of the columns
inline void norm_inf_cols(Vector<T>& norms) const;
/// returns the linf norms of the columns
inline void norm_inf_rows(Vector<T>& norms) const;
/// returns the linf norms of the columns
inline void norm_l1_rows(Vector<T>& norms) const;
/// returns the linf norms of the columns
inline void get_sum_cols(Vector<T>& sum) const;
/// returns the linf norms of the columns
inline void dot_col(const Matrix<T>& mat, Vector<T>& dots) const;
/// returns the l2 norms ^2 of the columns
inline void norm_2sq_cols(Vector<T>& norms) const;
/// returns the l2 norms of the columns
inline void norm_2sq_rows(Vector<T>& norms) const;
inline void thrsmax(const T nu);
inline void thrsmin(const T nu);
inline void thrsabsmin(const T nu);
/// perform soft-thresholding of the matrix, with the threshold nu
inline void softThrshold(const T nu);
inline void fastSoftThrshold(const T nu);
inline void fastSoftThrshold(Matrix<T>& output, const T nu) const;
inline void hardThrshold(const T nu);
/// perform soft-thresholding of the matrix, with the threshold nu
inline void thrsPos();
/// perform A <- A + alpha*vec1*vec2'
inline void rank1Update(const Vector<T>& vec1, const Vector<T>& vec2,
const T alpha = 1.0);
/// perform A <- A + alpha*vec1*vec2', when vec1 is sparse
template <typename I>
inline void rank1Update(const SpVector<T,I>& vec1, const Vector<T>& vec2,
const T alpha = 1.0);
/// perform A <- A + alpha*vec1*vec2', when vec2 is sparse
template <typename I>
inline void rank1Update(const Vector<T>& vec1, const SpVector<T,I>& vec2,
const T alpha = 1.0);
template <typename I>
inline void rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b,
const SpVector<T,I>& vec2,
const T alpha = 1.0);
/// perform A <- A + alpha*vec*vec', when vec2 is sparse
template <typename I>
inline void rank1Update(const SpVector<T,I>& vec,
const T alpha = 1.0);
/// perform A <- A + alpha*vec*vec', when vec2 is sparse
template <typename I>
inline void rank1Update(const SpVector<T,I>& vec, const SpVector<T,I>& vec2,
const T alpha = 1.0);
/// Compute the mean of the columns
inline void meanCol(Vector<T>& mean) const;
/// Compute the mean of the rows
inline void meanRow(Vector<T>& mean) const;
/// fill the matrix with the row given
inline void fillRow(const Vector<T>& row);
/// fill the matrix with the row given
inline void extractRow(const INTM i, Vector<T>& row) const;
inline void setRow(const INTM i, const Vector<T>& row);
inline void addRow(const INTM i, const Vector<T>& row, const T a=1.0);
/// compute x, such that b = Ax, WARNING this function needs to be u
/// updated
inline void conjugateGradient(const Vector<T>& b, Vector<T>& x,
const T tol = 1e-4, const int = 4) const;
/// compute x, such that b = Ax, WARNING this function needs to be u
/// updated, the temporary vectors are given.
inline void drop(char* fileName) const;
/// compute a Nadaraya Watson estimator
inline void NadarayaWatson(const Vector<INTM>& ind, const T sigma);
/// performs soft-thresholding of the vector
inline void blockThrshold(const T nu, const INTM sizeGroup);
/// performs sparse projections of the columns
inline void sparseProject(Matrix<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0,
const T lambda2 = 0, const T lambda3 = 0, const bool pos = false, const int numThreads=-1);
inline void transformFilter();
/// Conversion
/// make a sparse copy of the current matrix
inline void toSparse(SpMatrix<T>& matrix) const;
/// make a sparse copy of the current matrix
inline void toSparseTrans(SpMatrix<T>& matrixTrans);
/// make a reference of the matrix to a vector vec
inline void toVect(Vector<T>& vec) const;
/// Accessor
inline INTM V() const { return 1;};
/// extract the rows of a matrix corresponding to a binary mask
inline void copyMask(Matrix<T>& out, Vector<bool>& mask) const;
typedef Vector<T> col;
static const bool is_sparse = false;
protected:
/// Forbid lazy copies
explicit Matrix<T>(const Matrix<T>& matrix);
/// Forbid lazy copies
Matrix<T>& operator=(const Matrix<T>& matrix);
/// is the data allocation external or not
bool _externAlloc;
/// pointer to the data
T* _X;
/// number of rows
INTM _m;
/// number of columns
INTM _n;
};
/// Class for dense vector
template<typename T> class Vector {
friend class SpMatrix<T>;
friend class Matrix<T>;
friend class SpVector<T>;
public:
typedef T value_type;
typedef T element;
/// Empty constructor
Vector();
/// Constructor. Create a new vector of size n
Vector(INTM n);
/// Constructor with existing data
Vector(T* X, INTM n);
/// Copy constructor
explicit Vector<T>(const Vector<T>& vec);
/// Destructor
virtual ~Vector();
/// Accessors
/// Print the vector to std::cout
inline void print(const char* name) const;
inline void dump(const string& name) const;
/// returns the index of the largest value
inline INTM max() const;
/// returns the index of the minimum value
inline INTM min() const;
/// returns the maximum value
inline T maxval() const;
/// returns the minimum value
inline T minval() const;
/// returns the index of the value with largest magnitude
inline INTM fmax() const;
/// returns the index of the value with smallest magnitude
inline INTM fmin() const;
/// returns the maximum magnitude
inline T fmaxval() const;
/// returns the minimum magnitude
inline T fminval() const;
/// returns a reference to X[index]
inline T& operator[](const INTM index);
/// returns X[index]
inline T operator[](const INTM index) const;
/// make a copy of x
inline void copy(const Vector<T>& x);
inline void copyRef(const Vector<T>& x);
/// returns the size of the vector
inline int n() const { return _n; };
/// returns the size of the vector
inline int size() const { return _n; };
/// returns a modifiable reference of the data, DANGEROUS
inline T* rawX() const { return _X; };
/// change artificially the size of the vector, DANGEROUS
inline void fakeSize(const INTM n) { _n = n; };
/// generate logarithmically spaced values
inline void logspace(const INTM n, const T a, const T b);
inline INTM nnz() const;
/// Modifiers
/// Set all values to zero
inline void setZeros();
/// resize the vector
inline void resize(const INTM n, const bool set_zeros = true);
/// change the data of the vector
inline void setPointer(T* X, const INTM n);
inline void setData(T* X, const INTM n) { this->setPointer(X,n); };
inline void refData(const Vector<T>& vec) { this->setPointer(vec.rawX(),vec.n()); };
inline void refSubVec(INTM i, INTM n, Vector<T>& mat) const { mat.setData(_X+i,n); };
//inline void print(const char* name) const;
inline void print(const string& name) const;
/// put a random permutation of size n (for integral vectors)
inline void randperm(int n);
/// put a random permutation of size n (for integral vectors)
inline void randi(int n);
/// put random values in the vector (White Gaussian Noise)
inline void setAleat();
/// clear the vector
inline void clear();
/// performs soft-thresholding of the vector
inline void softThrshold(const T nu);
inline void fastSoftThrshold(const T nu);
inline void fastSoftThrshold(Vector<T>& out, const T nu) const;
inline void softThrsholdScal(Vector<T>& out, const T nu, const T s);
inline void hardThrshold(const T nu);
/// performs soft-thresholding of the vector
inline void thrsmax(const T nu);
inline void thrsmin(const T nu);
inline void thrsabsmin(const T nu);
/// performs soft-thresholding of the vector
inline void thrshold(const T nu);
/// performs soft-thresholding of the vector
inline void thrsPos();
/// set each value of the vector to val
inline void set(const T val);
inline void setn(const INTM n) { _n = n; }; //DANGEROUS
inline bool alltrue() const;
inline bool allfalse() const;
/// Algebric operations
/// returns ||A||_2
inline T nrm2() const;
/// returns ||A||_2^2
inline T nrm2sq() const;
/// returns A'x
inline T dot(const Vector<T>& x) const;
/// returns A'x, when x is sparse
template <typename I>
inline T dot(const SpVector<T,I>& x) const;
/// A <- A + a*x
inline void add(const Vector<T>& x, const T a = 1.0);
/// A <- A + a*x
template <typename I>
inline void add(const SpVector<T,I>& x, const T a = 1.0);
/// adds a to each value in the vector
inline void add(const T a);
/// A <- b*A + a*x
inline void add_scal(const Vector<T>& x, const T a = 1.0, const T b = 0);
/// A <- b*A + a*x
template <typename I>
inline void add_scal(const SpVector<T,I>& x, const T a = 1.0, const T b = 0);
/// A <- A - x
inline void sub(const Vector<T>& x);
/// A <- A + a*x
template <typename I>
inline void sub(const SpVector<T,I>& x);
/// A <- A ./ x
inline void div(const Vector<T>& x);
/// A <- x ./ y
inline void div(const Vector<T>& x, const Vector<T>& y);
/// A <- x .^ 2
inline void sqr(const Vector<T>& x);
/// A <- 1 ./ sqrt(x)
inline void sqr();
/// A <- 1 ./ sqrt(A)
inline void Sqrt(const Vector<T>& x);
/// A <- 1 ./ sqrt(x)
inline void Sqrt();
/// A <- 1 ./ sqrt(x)
inline void Invsqrt(const Vector<T>& x);
/// A <- 1 ./ sqrt(A)
inline void Invsqrt();
/// A <- 1./x
inline void inv(const Vector<T>& x);
/// A <- 1./A
inline void inv();
/// A <- x .* y
inline void mult(const Vector<T>& x, const Vector<T>& y);
inline void mult_elementWise(const Vector<T>& B, Vector<T>& C) const { C.mult(*this,B); };
/// normalize the vector
inline void normalize();
/// normalize the vector
inline void normalize2(const T thrs = 1.0);
/// whiten
inline void whiten(Vector<T>& mean, const bool pattern = false);
/// whiten
inline void whiten(Vector<T>& mean, const
Vector<T>& mask);
/// whiten
inline void whiten(const INTM V);
/// whiten
inline T mean() const;
inline T abs_mean() const;
inline T mean_non_uniform(const Vector<T>& qi) const;
/// whiten
inline T std();
/// compute the Kuhlback-Leiber divergence
inline T KL(const Vector<T>& X);
/// whiten
inline void unwhiten(Vector<T>& mean, const bool pattern = false);
/// scale the vector by a
inline void scal(const T a);
/// A <- -A
inline void neg();
/// replace each value by its exponential
inline void exp();
/// replace each value by its logarithm
inline void log();
/// replace each value by its absolute value
inline void abs_vec();
/// replace each value by its exponential
inline void logexp();
/// replace each value by its exponential
inline T softmax(const int y);
inline T logsumexp();
/// computes the sum of the magnitudes of the vector
inline T asum() const;
inline T lzero() const;
/// compute the sum of the differences
inline T afused() const;
/// returns the sum of the vector
inline T sum() const;
/// puts in signs, the sign of each point in the vector
inline void sign(Vector<T>& signs) const;
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
inline void l1project(Vector<T>& out, const T thrs, const bool simplex = false) const;
inline void l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual = false) const;
inline void l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos = false,
const int mode = 1);
inline void sparseProject(Vector<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0,
const T lambda2 = 0, const T lambda3 = 0, const bool pos = false);
inline void project_sft(const Vector<int>& labels, const int clas);
inline void project_sft_binary(const Vector<T>& labels);
/// projects the vector onto the l1 ball of radius thrs,
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
inline void l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos = false) const;
inline void fusedProject(Vector<T>& out, const T lambda1, const T lambda2, const int itermax);
inline void fusedProjectHomotopy(Vector<T>& out, const T lambda1,const T lambda2,const T lambda3 = 0,
const bool penalty = true);
/// projects the vector onto the l1 ball of radius thrs,
/// sort the vector
inline void sort(Vector<T>& out, const bool mode) const;
/// sort the vector
inline void sort(const bool mode);
//// sort the vector
inline void sort2(Vector<T>& out, Vector<INTM>& key, const bool mode) const;
/// sort the vector
inline void sort2(Vector<INTM>& key, const bool mode);
/// sort the vector
inline void applyBayerPattern(const int offset);
/// Conversion
/// make a sparse copy
inline void toSparse(SpVector<T>& vec) const;
/// extract the rows of a matrix corresponding to a binary mask
inline void copyMask(Vector<T>& out, Vector<bool>& mask) const;
inline void getIndices(Vector<int>& ind) const { }; // irrelevant for dense vectors
template <typename I>
inline void refIndices(Vector<I>& ind) const { }; // irrelevant for dense vectors
private:
/// = operator,
Vector<T>& operator=(const Vector<T>& vec);
/// if the data has been externally allocated
bool _externAlloc;
/// data
T* _X;
/// size of the vector
INTM _n;
};
/// Sparse Matrix class, CSC format
template<typename T, typename I> class SpMatrix {
friend class Matrix<T>;
friend class SpVector<T,I>;
public:
typedef T value_type;
typedef SpVector<T,I> col_type;
typedef I index_type;
/// Constructor, CSC format, existing data
SpMatrix(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax);
/// Constructor, new m x n matrix, with at most nzmax non-zeros values
SpMatrix(I m, I n, I nzmax);
/// Empty constructor
SpMatrix();
/// Destructor
~SpMatrix();
/// Accessors
/// reference the column i Io vec
inline void refCol(I i, SpVector<T,I>& vec) const;
/// returns pB[i]
inline I pB(const I i) const { return _pB[i]; };
/// returns r[i]
inline I r(const I i) const { return _r[i]; };
/// returns v[i]
inline T v(const I i) const { return _v[i]; };
/// returns the maximum number of non-zero elements
inline I nzmax() const { return _nzmax; };
/// returns the number of rows
inline I n() const { return _n; };
/// returns the number of columns
inline I m() const { return _m; };
/// returns the number of columns
inline I V() const { return 1; };
/// returns X[index]
inline T operator[](const I index) const;
void getData(Vector<T>& data, const I index) const;
void setData(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax);
/// print the sparse matrix
inline void print(const string& name) const;
/// compute the sum of the matrix elements
inline T asum() const;
/// compute the sum of the matrix elements
inline T normFsq() const;
/// Direct access to _pB
inline I* pB() const { return _pB; };
/// Direct access to _pE
inline I* pE() const { return _pE; };
/// Direct access to _r
inline I* r() const { return _r; };
/// Direct access to _v
inline T* v() const { return _v; };
/// number of nonzeros elements
inline I nnz() const { return _pB[_n]; };
inline void add_direct(const SpMatrix<T,I>& mat, const T a);
inline void copy_direct(const SpMatrix<T,I>& mat);
inline T dot_direct(const SpMatrix<T,I>& mat) const;
/// Modifiers
/// clear the matrix
inline void clear();
/// resize the matrix
inline void resize(const I m, const I n, const I nzmax);
/// scale the matrix by a
inline void scal(const T a) const;
inline T abs_mean() const;
/// Algebraic operations
/// aat <- A*A'
inline void AAt(Matrix<T>& aat) const;
/// aat <- A(:,indices)*A(:,indices)'
inline void AAt(Matrix<T>& aat, const Vector<I>& indices) const;
/// aat <- sum_i w_i A(:,i)*A(:,i)'
inline void wAAt(const Vector<T>& w, Matrix<T>& aat) const;
/// XAt <- X*A'
inline void XAt(const Matrix<T>& X, Matrix<T>& XAt) const;
/// XAt <- X(:,indices)*A(:,indices)'
inline void XAt(const Matrix<T>& X, Matrix<T>& XAt,
const Vector<I>& indices) const;
/// XAt <- sum_i w_i X(:,i)*A(:,i)'
inline void wXAt( const Vector<T>& w, const Matrix<T>& X,
Matrix<T>& XAt, const int numthreads=-1) const;
inline void XtX(Matrix<T>& XtX) const;
/// y <- A'*x
inline void multTrans(const Vector<T>& x, Vector<T>& y,
const T alpha = 1.0, const T beta = 0.0) const;
inline void multTrans(const SpVector<T,I>& x, Vector<T>& y,
const T alpha = 1.0, const T beta = 0.0) const;
/// perform b = alpha*A*x + beta*b, when x is sparse
inline void mult(const SpVector<T,I>& x, Vector<T>& b,
const T alpha = 1.0, const T beta = 0.0) const;
/// perform b = alpha*A*x + beta*b, when x is sparse
inline void mult(const Vector<T>& x, Vector<T>& b,
const T alpha = 1.0, const T beta = 0.0) const;
/// perform C = a*A*B + b*C, possibly transposing A or B.
inline void mult(const Matrix<T>& B, Matrix<T>& C,
const bool transA = false, const bool transB = false,
const T a = 1.0, const T b = 0.0) const;
/// perform C = a*B*A + b*C, possibly transposing A or B.
inline void multSwitch(const Matrix<T>& B, Matrix<T>& C,
const bool transA = false, const bool transB = false,
const T a = 1.0, const T b = 0.0) const;
/// perform C = a*B*A + b*C, possibly transposing A or B.
inline void mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA = false,
const bool transB = false, const T a = 1.0,
const T b = 0.0) const;
/// make a copy of the matrix mat in the current matrix
inline void copyTo(Matrix<T>& mat) const { this->toFull(mat); };
/// dot product;
inline T dot(const Matrix<T>& x) const;
inline void copyRow(const I i, Vector<T>& x) const;
inline void sum_cols(Vector<T>& sum) const;
inline void copy(const SpMatrix<T,I>& mat);
/// Conversions
/// copy the sparse matrix into a dense matrix
inline void toFull(Matrix<T>& matrix) const;
/// copy the sparse matrix into a dense transposed matrix
inline void toFullTrans(Matrix<T>& matrix) const;
/// use the data from v, r for _v, _r
inline void convert(const Matrix<T>&v, const Matrix<I>& r,
const I K);
/// use the data from v, r for _v, _r
inline void convert2(const Matrix<T>&v, const Vector<I>& r,
const I K);
inline void normalize();
inline void normalize_rows();
/// returns the l2 norms ^2 of the columns
inline void norm_2sq_cols(Vector<T>& norms) const;
/// returns the l0 norms of the columns
inline void norm_0_cols(Vector<T>& norms) const;
/// returns the l1 norms of the columns
inline void norm_1_cols(Vector<T>& norms) const;
inline void addVecToCols(const Vector<T>& diag, const T a = 1.0);
inline void addVecToColsWeighted(const Vector<T>& diag, const T* weights, const T a = 1.0);
typedef SpVector<T,I> col;
static const bool is_sparse = true;
private:
/// forbid copy constructor
explicit SpMatrix(const SpMatrix<T,I>& matrix);
SpMatrix<T,I>& operator=(const SpMatrix<T,I>& matrix);
/// if the data has been externally allocated
bool _externAlloc;
/// data
T* _v;
/// row indices
I* _r;
/// indices of the beginning of columns
I* _pB;
/// indices of the end of columns
I* _pE;
/// number of rows
I _m;
/// number of columns
I _n;
/// number of non-zero values
I _nzmax;
};
/// Sparse vector class
template <typename T, typename I> class SpVector {
friend class Matrix<T>;
friend class SpMatrix<T,I>;
friend class Vector<T>;
public:
typedef T value_type;
/// Constructor, of the sparse vector of size L.
SpVector(T* v, I* r, I L, I nzmax);
/// Constructor, allocates nzmax slots
SpVector(I nzmax);
/// Empty constructor
SpVector();
/// Destructor
~SpVector();
/// Accessors
/// returns the length of the vector
inline T nzmax() const { return _nzmax; };
/// returns the length of the vector
inline T length() const { return _L; };
/// computes the sum of the magnitude of the elements
inline T asum() const;
/// computes the l2 norm ^2 of the vector
inline T nrm2sq() const;
/// computes the l2 norm of the vector
inline T nrm2() const;
/// computes the linf norm of the vector
inline T fmaxval() const;
/// print the vector to std::cerr
inline void print(const string& name) const;
inline void refIndices(Vector<I>& indices) const;
/// creates a reference on the vector val
inline void refVal(Vector<T>& val) const;
/// access table r
inline I r(const I i) const { return _r[i]; };
/// access table r
inline T v(const I i) const { return _v[i]; };
inline T* rawX() const { return _v; };
inline I* rawR() const { return _r; };
///
inline I L() const { return _L; };
///
inline void setL(const I L) { _L=L; };
/// a <- a.^2
inline void sqr();
/// dot product
inline T dot(const SpVector<T,I>& vec) const;
/// dot product
inline T dot(const Vector<T>& vec) const;
/// dot product
inline void scal(const T a);
/// Modifiers
/// clears the vector
inline void clear();
/// resizes the vector
inline void resize(const I nzmax);
/// resize the vector as a sparse matrix
void inline toSpMatrix(SpMatrix<T,I>& out,
const I m, const I n) const;
/// resize the vector as a sparse matrix
void inline toFull(Vector<T>& out) const;
inline void getIndices(Vector<int>& ind) const;
private:
/// forbids lazy copies
explicit SpVector(const SpVector<T,I>& vector);
SpVector<T,I>& operator=(const SpVector<T,I>& vector);
/// external allocation
bool _externAlloc;
/// data
T* _v;
/// indices
I* _r;
/// length
I _L;
/// maximum number of nonzeros elements
I _nzmax;
};
/// Class for dense vector
template<typename T, typename I> class LazyVector {
public:
LazyVector(Vector<T>& x, const Vector<T>& z, const int n) : _x(x), _z(z), _n(n+1), _p(x.n()) {
_current_time=0;
_dates.resize(_p);
_dates.setZeros();
_stats1.resize(n+1);
_stats2.resize(n+1);
_stats1[0]=T(1.0);
_stats2[0]=0;
};
void inline update() {
for (int ii=0; ii<_p; ++ii) {
update(ii);
}
_current_time=0;
_dates.setZeros();
};
void inline update(const I ind) {
const int last_time=_dates[ind];
if (last_time != _current_time) {
_x[ind] = (_stats1[_current_time]/_stats1[last_time])*_x[ind] + _stats1[_current_time]*(_stats2[_current_time]-_stats2[last_time])*_z[ind];
_dates[ind]=_current_time;
}
};
void inline update(const Vector<I>& indices) {
const int p = indices.n();
for (int ii=0; ii<p; ++ii) {
update(indices[ii]);
}
};
void inline add_scal(const T a, const T b) { // performs x <- a(x - b z)
if (_current_time == _n)
update();
_current_time++;
_stats2[_current_time]=_stats2[_current_time-1] + a/_stats1[_current_time-1];
_stats1[_current_time]=_stats1[_current_time-1]*b;
if (_stats1[_current_time] < 1e-7)
update(); // to prevent numerical stability problems
};
private:
Vector<T>& _x;
const Vector<T>& _z;
const int _n;
const int _p;
Vector<T> _stats1, _stats2;
Vector<int> _dates;
int _current_time;
};
/// Class for dense vector
template<typename T, typename I> class DoubleLazyVector {
public:
DoubleLazyVector(Vector<T>& x, const Vector<T>& z1, const Vector<T>& z2, const int n) : _x(x), _z1(z1), _z2(z2), _n(n+1), _p(x.n()) {
_current_time=0;
_dates.resize(_p);
_dates.setZeros();
_stats1.resize(n+1);
_stats2.resize(n+1);
_stats3.resize(n+1);
_stats1[0]=T(1.0);
_stats2[0]=0;
_stats3[0]=0;
};
void inline update() {
for (int ii=0; ii<_p; ++ii) {
update(ii);
}
_current_time=0;
_dates.setZeros();
};
void inline update(const I ind) {
const int last_time=_dates[ind];
if (last_time != _current_time) {
_x[ind] = _stats1[_current_time]* ( _x[ind]/_stats1[last_time] + (_stats2[_current_time]-_stats2[last_time])*_z1[ind] + (_stats3[_current_time]-_stats3[last_time])*_z2[ind]);
_dates[ind]=_current_time;
}
};
void inline update(const Vector<I>& indices) {
const int p = indices.n();
for (int ii=0; ii<p; ++ii) {
update(indices[ii]);
}
};
void inline add_scal(const T a, const T b, const T c) {
if (_current_time == _n)
update();
_current_time++;
_stats1[_current_time]=_stats1[_current_time-1]*c;
_stats2[_current_time]=_stats2[_current_time-1] + a/_stats1[_current_time];
_stats3[_current_time]=_stats3[_current_time-1] + b/_stats1[_current_time];
if (_stats1[_current_time] < 1e-6)
update(); // to prevent numerical stability problems
};
private:
Vector<T>& _x;
const Vector<T>& _z1;
const Vector<T>& _z2;
const int _n;
const int _p;
Vector<T> _stats1, _stats2, _stats3;
Vector<int> _dates;
int _current_time;
};
/* ************************************
* Implementation of the class Matrix
* ************************************/
/// Constructor with existing data X of an m x n matrix
template <typename T> Matrix<T>::Matrix(T* X, INTM m, INTM n) :
_externAlloc(true), _X(X), _m(m), _n(n) { };
/// Constructor for a new m x n matrix
template <typename T> Matrix<T>::Matrix(INTM m, INTM n) :
_externAlloc(false), _m(m), _n(n) {
#pragma omp critical
{
_X= new T[_n*_m];
}
};
/// Empty constructor
template <typename T> Matrix<T>::Matrix() :
_externAlloc(false), _X(NULL), _m(0), _n(0) { };
/// Destructor
template <typename T> Matrix<T>::~Matrix() {
clear();
};
/// Return a modifiable reference to X(i,j)
template <typename T> inline T& Matrix<T>::operator()(const INTM i, const INTM j) {
return _X[j*_m+i];
};
/// Return the value X(i,j)
template <typename T> inline T Matrix<T>::operator()(const INTM i, const INTM j) const {
return _X[j*_m+i];
};
/// Print the matrix to std::cout
template <typename T> inline void Matrix<T>::print(const string& name) const {
std::cerr << name << std::endl;
std::cerr << _m << " x " << _n << std::endl;
for (INTM i = 0; i<_m; ++i) {
for (INTM j = 0; j<_n; ++j) {
printf("%10.5g ",static_cast<double>(_X[j*_m+i]));
}
printf("\n ");
}
printf("\n ");
};
/// Print the matrix to std::cout
template <typename T> inline void Matrix<T>::dump(const string& name) const {
ofstream f;
f.open(name);
f.precision(20);
std::cerr << name << std::endl;
f << _m << " x " << _n << std::endl;
for (INTM i = 0; i<_m; ++i) {
for (INTM j = 0; j<_n; ++j) {
f << static_cast<double>(_X[j*_m+i]) << " ";
}
f << std::endl;
}
f << std::endl;
f.close();
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::copyCol(const INTM i, Vector<T>& x) const {
assert(i >= 0 && i<_n);
x.resize(_m);
cblas_copy<T>(_m,_X+i*_m,1,x._X,1);
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::copyRow(const INTM i, Vector<T>& x) const {
assert(i >= 0 && i<_m);
x.resize(_n);
cblas_copy<T>(_n,_X+i,_m,x._X,1);
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::scalRow(const INTM i, const T s) const {
assert(i >= 0 && i<_m);
for (int ii=0; ii<_n; ++ii)
_X[i+ii*_m] *= s;
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::copyToRow(const INTM i, const Vector<T>& x) {
assert(i >= 0 && i<_m);
cblas_copy<T>(_n,x._X,1,_X+i,_m);
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::extract_rawCol(const INTM i, T* x) const {
assert(i >= 0 && i<_n);
cblas_copy<T>(_m,_X+i*_m,1,x,1);
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::add_rawCol(const INTM i, T* x, const T a) const {
assert(i >= 0 && i<_n);
cblas_axpy<T>(_m,a,_X+i*_m,1,x,1);
};
/// Copy the column i INTMo x
template <typename T> inline void Matrix<T>::getData(Vector<T>& x, const INTM i) const {
this->copyCol(i,x);
};
/// Reference the column i into the vector x
template <typename T> inline void Matrix<T>::refCol(INTM i, Vector<T>& x) const {
assert(i >= 0 && i<_n);
x.clear();
x._X=_X+i*_m;
x._n=_m;
x._externAlloc=true;
};
/// Reference the column i to i+n INTMo the Matrix mat
template <typename T> inline void Matrix<T>::refSubMat(INTM i, INTM n, Matrix<T>& mat) const {
mat.setData(_X+i*_m,_m,n);
}
/// Check wether the columns of the matrix are normalized or not
template <typename T> inline bool Matrix<T>::isNormalized() const {
for (INTM i = 0; i<_n; ++i) {
T norm=cblas_nrm2<T>(_m,_X+_m*i,1);
if (fabs(norm - 1.0) > 1e-6) return false;
}
return true;
};
/// clean a dictionary matrix
template <typename T>
inline void Matrix<T>::clean() {
this->normalize();
Matrix<T> G;
this->XtX(G);
T* prG = G._X;
/// remove the diagonal
for (INTM i = 0; i<_n; ++i) {
for (INTM j = i+1; j<_n; ++j) {
if (prG[i*_n+j] > 0.99) {
// remove nasty column j and put random values inside
Vector<T> col;
this->refCol(j,col);
col.setAleat();
col.normalize();
}
}
}
};
/// return the 1D-index of the value of greatest magnitude
template <typename T> inline INTM Matrix<T>::fmax() const {
return cblas_iamax<T>(_n*_m,_X,1);
};
/// return the value of greatest magnitude
template <typename T> inline T Matrix<T>::fmaxval() const {
return _X[cblas_iamax<T>(_n*_m,_X,1)];
};
/// return the 1D-index of the value of lowest magnitude
template <typename T> inline INTM Matrix<T>::fmin() const {
return cblas_iamin<T>(_n*_m,_X,1);
};
/// extract a sub-matrix of a symmetric matrix
template <typename T> inline void Matrix<T>::subMatrixSym(
const Vector<INTM>& indices, Matrix<T>& subMatrix) const {
INTM L = indices.n();
subMatrix.resize(L,L);
T* out = subMatrix._X;
INTM* rawInd = indices.rawX();
for (INTM i = 0; i<L; ++i)
for (INTM j = 0; j<=i; ++j)
out[i*L+j]=_X[rawInd[i]*_n+rawInd[j]];
subMatrix.fillSymmetric();
};
/// Resize the matrix
template <typename T> inline void Matrix<T>::resize(INTM m, INTM n, const bool set_zeros) {
if (_n==n && _m==m) return;
clear();
_n=n;
_m=m;
_externAlloc=false;
#pragma omp critical
{
_X=new T[_n*_m];
}
if (set_zeros)
setZeros();
};
/// Change the data in the matrix
template <typename T> inline void Matrix<T>::setData(T* X, INTM m, INTM n) {
clear();
_X=X;
_m=m;
_n=n;
_externAlloc=true;
};
/// Set all the values to zero
template <typename T> inline void Matrix<T>::setZeros() {
memset(_X,0,_n*_m*sizeof(T));
};
/// Set all the values to a scalar
template <typename T> inline void Matrix<T>::set(const T a) {
for (INTM i = 0; i<_n*_m; ++i) _X[i]=a;
};
/// Clear the matrix
template <typename T> inline void Matrix<T>::clear() {
if (!_externAlloc) delete[](_X);
_n=0;
_m=0;
_X=NULL;
_externAlloc=true;
};
/// Put white Gaussian noise in the matrix
template <typename T> inline void Matrix<T>::setAleat() {
for (INTM i = 0; i<_n*_m; ++i) _X[i]=normalDistrib<T>();
};
/// set the matrix to the identity
template <typename T> inline void Matrix<T>::eye() {
this->setZeros();
for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] = T(1.0);
};
/// Normalize all columns to unit l2 norm
template <typename T> inline void Matrix<T>::normalize() {
//T constant = 1.0/sqrt(_m);
for (INTM i = 0; i<_n; ++i) {
T norm=cblas_nrm2<T>(_m,_X+_m*i,1);
if (norm > 1e-10) {
T invNorm=1.0/norm;
cblas_scal<T>(_m,invNorm,_X+_m*i,1);
} else {
// for (INTM j = 0; j<_m; ++j) _X[_m*i+j]=constant;
Vector<T> d;
this->refCol(i,d);
d.setAleat();
d.normalize();
}
}
};
/// Normalize all columns which l2 norm is greater than one.
template <typename T> inline void Matrix<T>::normalize2() {
for (INTM i = 0; i<_n; ++i) {
T norm=cblas_nrm2<T>(_m,_X+_m*i,1);
if (norm > 1.0) {
T invNorm=1.0/norm;
cblas_scal<T>(_m,invNorm,_X+_m*i,1);
}
}
};
/// center the matrix
template <typename T> inline void Matrix<T>::center() {
for (INTM i = 0; i<_n; ++i) {
Vector<T> col;
this->refCol(i,col);
T sum = col.sum();
col.add(-sum/static_cast<T>(_m));
}
};
/// center the matrix
template <typename T> inline void Matrix<T>::center_rows() {
Vector<T> mean_rows(_m);
mean_rows.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
mean_rows[j] += _X[i*_m+j];
mean_rows.scal(T(1.0)/_n);
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
_X[i*_m+j] -= mean_rows[j];
};
/// center the matrix
template <typename T> inline void Matrix<T>::normalize_rows() {
Vector<T> norm_rows(_m);
norm_rows.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
norm_rows[j] += _X[i*_m+j]*_X[i*_m+j];
norm_rows.sqr();
this->multDiagRight(norm_rows);
};
/// center the matrix and keep the center values
template <typename T> inline void Matrix<T>::center(Vector<T>& centers) {
centers.resize(_n);
for (INTM i = 0; i<_n; ++i) {
Vector<T> col;
this->refCol(i,col);
T sum = col.sum()/static_cast<T>(_m);
centers[i]=sum;
col.add(-sum);
}
};
/// scale the matrix by the a
template <typename T> inline void Matrix<T>::scal(const T a) {
cblas_scal<T>(_n*_m,a,_X,1);
};
/// make a copy of the matrix mat in the current matrix
template <typename T> inline void Matrix<T>::copy(const Matrix<T>& mat) {
if (_X != mat._X) {
resize(mat._m,mat._n);
// cblas_copy<T>(_m*_n,mat._X,1,_X,1);
memcpy(_X,mat._X,_m*_n*sizeof(T));
}
};
/// make a copy of the matrix mat in the current matrix
template <typename T> inline void Matrix<T>::copyRef(const Matrix<T>& mat) {
this->setData(mat.rawX(),mat.m(),mat.n());
};
/// make the matrix symmetric by copying the upper-right part
/// INTMo the lower-left part
template <typename T> inline void Matrix<T>::fillSymmetric() {
for (INTM i = 0; i<_n; ++i) {
for (INTM j =0; j<i; ++j) {
_X[j*_m+i]=_X[i*_m+j];
}
}
};
template <typename T> inline void Matrix<T>::fillSymmetric2() {
for (INTM i = 0; i<_n; ++i) {
for (INTM j =0; j<i; ++j) {
_X[i*_m+j]=_X[j*_m+i];
}
}
};
template <typename T> inline void Matrix<T>::whiten(const INTM V) {
const INTM sizePatch=_m/V;
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
T mean = 0;
for (INTM k = 0; k<sizePatch; ++k) {
mean+=_X[i*_m+sizePatch*j+k];
}
mean /= sizePatch;
for (INTM k = 0; k<sizePatch; ++k) {
_X[i*_m+sizePatch*j+k]-=mean;
}
}
}
};
template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const bool pattern) {
mean.setZeros();
if (pattern) {
const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_m)));
INTM count[4];
for (INTM i = 0; i<4; ++i) count[i]=0;
for (INTM i = 0; i<_n; ++i) {
INTM offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
mean[2*offsetx+offsety]+=_X[i*_m+j*n+k];
count[2*offsetx+offsety]++;
}
}
}
for (INTM i = 0; i<4; ++i)
mean[i] /= count[i];
for (INTM i = 0; i<_n; ++i) {
INTM offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
_X[i*_m+j*n+k]-=mean[2*offsetx+offsety];
}
}
}
} else {
const INTM V = mean.n();
const INTM sizePatch=_m/V;
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
for (INTM k = 0; k<sizePatch; ++k) {
mean[j]+=_X[i*_m+sizePatch*j+k];
}
}
}
mean.scal(T(1.0)/(_n*sizePatch));
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
for (INTM k = 0; k<sizePatch; ++k) {
_X[i*_m+sizePatch*j+k]-=mean[j];
}
}
}
}
};
template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const
Vector<T>& mask) {
const INTM V = mean.n();
const INTM sizePatch=_m/V;
mean.setZeros();
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
for (INTM k = 0; k<sizePatch; ++k) {
mean[j]+=_X[i*_m+sizePatch*j+k];
}
}
}
for (INTM i = 0; i<V; ++i)
mean[i] /= _n*cblas_asum(sizePatch,mask._X+i*sizePatch,1);
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
for (INTM k = 0; k<sizePatch; ++k) {
if (mask[sizePatch*j+k])
_X[i*_m+sizePatch*j+k]-=mean[j];
}
}
}
};
template <typename T> inline void Matrix<T>::unwhiten(Vector<T>& mean, const bool pattern) {
if (pattern) {
const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_m)));
for (INTM i = 0; i<_n; ++i) {
INTM offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
_X[i*_m+j*n+k]+=mean[2*offsetx+offsety];
}
}
}
} else {
const INTM V = mean.n();
const INTM sizePatch=_m/V;
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<V; ++j) {
for (INTM k = 0; k<sizePatch; ++k) {
_X[i*_m+sizePatch*j+k]+=mean[j];
}
}
}
}
};
/// Transpose the current matrix and put the result in the matrix
/// trans
template <typename T> inline void Matrix<T>::transpose(Matrix<T>& trans) const {
trans.resize(_n,_m);
T* out = trans._X;
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
out[j*_n+i] = _X[i*_m+j];
};
/// A <- -A
template <typename T> inline void Matrix<T>::neg() {
for (INTM i = 0; i<_n*_m; ++i) _X[i]=-_X[i];
};
template <typename T> inline void Matrix<T>::incrDiag() {
for (INTM i = 0; i<MIN(_n,_m); ++i) ++_X[i*_m+i];
};
template <typename T> inline void Matrix<T>::addDiag(
const Vector<T>& diag) {
T* d= diag.rawX();
for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += d[i];
};
template <typename T> inline void Matrix<T>::addDiag(
const T diag) {
for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += diag;
};
template <typename T> inline void Matrix<T>::addToCols(
const Vector<T>& cent) {
Vector<T> col;
for (INTM i = 0; i<_n; ++i) {
this->refCol(i,col);
col.add(cent[i]);
}
};
template <typename T> inline void Matrix<T>::addVecToCols(
const Vector<T>& vec, const T a) {
Vector<T> col;
for (INTM i = 0; i<_n; ++i) {
this->refCol(i,col);
col.add(vec,a);
}
};
/// perform a rank one approximation uv' using the power method
/// u0 is an initial guess for u (can be empty).
template <typename T> inline void Matrix<T>::svdRankOne(const Vector<T>& u0,
Vector<T>& u, Vector<T>& v) const {
int i;
const int max_iter=MAX(_m,MAX(_n,200));
const T eps=1e-10;
u.resize(_m);
v.resize(_n);
T norm=u0.nrm2();
Vector<T> up(u0);
if (norm < EPSILON) up.setAleat();
up.normalize();
multTrans(up,v);
for (i = 0; i<max_iter; ++i) {
mult(v,u);
norm=u.nrm2();
u.scal(1.0/norm);
multTrans(u,v);
T theta=u.dot(up);
if (i > 10 && (1 - fabs(theta)) < eps) break;
up.copy(u);
}
};
template <typename T> inline void Matrix<T>::svd2(Matrix<T>& U, Vector<T>& S, const int num, const int method) const {
const INTM num_eig= (num == -1 || method <= 1) ? MIN(_m,_n) : MIN(MIN(_m,num),_n);
S.resize(num_eig);
U.resize(_m,num_eig);
if (method==0) {
// gesv
T* vv = NULL;
Matrix<T> copyX;
copyX.copy(*this);
gesvd<T>(reduced,no,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,vv,1);
} else if (method==1) {
// syev
if (_m == num_eig) {
this->XXt(U);
syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX());
} else {
Matrix<T> XXt(_m,_m);
this->XXt(XXt); // in fact should do XtX, but will do that later
Vector<T> ss(_m);
syev<T>(allV,lower,_m,XXt.rawX(),_m,ss.rawX());
memcpy(U.rawX(),XXt.rawX()+(_m-num_eig)*_m,_m*num_eig*sizeof(T));
memcpy(S.rawX(),ss.rawX()+_m-num_eig,num_eig*sizeof(T));
}
S.thrsPos();
S.Sqrt();
} else if (method==2) {
// syevr
Matrix<T> XXt(_m,_m);
this->XXt(XXt); // in fact should do XtX, but will do that later
if (_m == num_eig) {
syevr(allV,rangeAll,lower,_m,XXt.rawX(),_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m);
} else {
Vector<T> ss(_m);
syevr(allV,range,lower,_m,XXt.rawX(),_m,T(0),T(0),_m-num_eig+1,_m,ss.rawX(),U.rawX(),_m);
memcpy(S.rawX(),ss.rawX(),num_eig*sizeof(T));
}
S.thrsPos();
for (int ii=0; ii<S.n(); ++ii)
S[ii]=alt_sqrt<T>(S[ii]);
//S.Sqrt();
}
if (method==1 || method==2) {
Vector<T> col, col2;
Vector<T> tmpcol(_m);
const int n=U.n();
for (int ii=0; ii<n/2; ++ii) {
T tmp=S[n-ii-1];
S[n-ii-1]=S[ii];
S[ii]=tmp;
U.refCol(n-ii-1,col);
U.refCol(ii,col2);
tmpcol.copy(col);
col.copy(col2);
col2.copy(tmpcol);
}
}
}
template <typename T> inline void Matrix<T>::SymEig(Matrix<T>& U, Vector<T>& S) const {
const int num_eig=_m;
S.resize(_m);
U.resize(_m,_m);
syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m);
S.thrsPos();
}
template <typename T> inline void Matrix<T>::InvsqrtMat(Matrix<T>& out, const T lambda) const {
const int num_eig=_m;
Vector<T> S;
S.resize(_m);
Matrix<T> U, U2;
U.resize(_m,_m);
syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m);
S.thrsPos();
//for (int ii=0; ii<_m; ++ii) S[ii]=sqrt(S[ii])/(S[ii]+lambda);
//for (int ii=0; ii<_m; ++ii) S[ii]= S[ii] > 1e-6 ? T(1.0)/S[ii] : 0;
for (int ii=0; ii<_m; ++ii) S[ii]= S[ii] > 1e-6 ? T(1.0)/sqrt(S[ii]+lambda) : 0;
U2.copy(U);
U2.multDiagRight(S);
U2.mult(U,out,false,true);
}
template <typename T> inline void Matrix<T>::sqrtMat(Matrix<T>& out) const {
const int num_eig=_m;
Vector<T> S;
S.resize(_m);
Matrix<T> U, U2;
U.resize(_m,_m);
syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m);
S.thrsPos();
S.Sqrt();
U2.copy(U);
U2.multDiagRight(S);
U2.mult(U,out,false,true);
}
template <typename T> inline void Matrix<T>::singularValues(Vector<T>& u) const {
u.resize(MIN(_m,_n));
if (_m > 10*_n) {
Matrix<T> XtX;
this->XtX(XtX);
syev<T>(no,lower,_n,XtX.rawX(),_n,u.rawX());
u.thrsPos();
u.Sqrt();
} else if (_n > 10*_m) {
Matrix<T> XXt;
this->XXt(XXt);
syev<T>(no,lower,_m,XXt.rawX(),_m,u.rawX());
u.thrsPos();
u.Sqrt();
} else {
T* vu = NULL;
T* vv = NULL;
Matrix<T> copyX;
copyX.copy(*this);
gesvd<T>(no,no,_m,_n,copyX._X,_m,u.rawX(),vu,1,vv,1);
}
};
template <typename T> inline void Matrix<T>::svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const {
const INTM num_eig=MIN(_m,_n);
S.resize(num_eig);
U.resize(_m,num_eig);
V.resize(num_eig,_n);
if (_m > 10*_n) {
Matrix<T> Vt(_n,_n);
this->XtX(Vt);
syev<T>(allV,lower,_n,Vt.rawX(),_n,S.rawX());
S.thrsPos();
S.Sqrt();
this->mult(Vt,U);
Vt.transpose(V);
Vector<T> inveigs;
inveigs.copy(S);
for (INTM i = 0; i<num_eig; ++i)
if (S[i] > 1e-10) {
inveigs[i]=T(1.0)/S[i];
} else {
inveigs[i]=T(1.0);
}
U.multDiagRight(inveigs);
} else if (_n > 10*_m) {
this->XXt(U);
syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX());
S.thrsPos();
S.Sqrt();
U.mult(*this,V,true,false);
Vector<T> inveigs;
inveigs.copy(S);
for (INTM i = 0; i<num_eig; ++i)
if (S[i] > 1e-10) {
inveigs[i]=T(1.0)/S[i];
} else {
inveigs[i]=T(1.0);
}
V.multDiagLeft(inveigs);
} else {
Matrix<T> copyX;
copyX.copy(*this);
gesvd<T>(reduced,reduced,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,V.rawX(),num_eig);
}
};
/// find the eigenvector corresponding to the largest eigenvalue
/// when the current matrix is symmetric. u0 is the initial guess.
/// using two iterations of the power method
template <typename T> inline void Matrix<T>::eigLargestSymApprox(
const Vector<T>& u0, Vector<T>& u) const {
int i,j;
const int max_iter=100;
const T eps=10e-6;
u.copy(u0);
T norm = u.nrm2();
T theta;
u.scal(1.0/norm);
Vector<T> up(u);
Vector<T> uor(u);
T lambda=T();
for (j = 0; j<2;++j) {
up.copy(u);
for (i = 0; i<max_iter; ++i) {
mult(up,u);
norm = u.nrm2();
u.scal(1.0/norm);
theta=u.dot(up);
if ((1 - fabs(theta)) < eps) break;
up.copy(u);
}
lambda+=theta*norm;
if (isnan(lambda)) {
std::cerr << "eigLargestSymApprox failed" << std::endl;
exit(1);
}
if (j == 1 && lambda < eps) {
u.copy(uor);
break;
}
if (theta >= 0) break;
u.copy(uor);
for (i = 0; i<_m; ++i) _X[i*_m+i]-=lambda;
}
};
/// find the eigenvector corresponding to the eivenvalue with the
/// largest magnitude when the current matrix is symmetric,
/// using the power method. It
/// returns the eigenvalue. u0 is an initial guess for the
/// eigenvector.
template <typename T> inline T Matrix<T>::eigLargestMagnSym(
const Vector<T>& u0, Vector<T>& u) const {
const int max_iter=1000;
const T eps=10e-6;
u.copy(u0);
T norm = u.nrm2();
u.scal(1.0/norm);
Vector<T> up(u);
T lambda=T();
for (int i = 0; i<max_iter; ++i) {
mult(u,up);
u.copy(up);
norm=u.nrm2();
if (norm > 0) u.scal(1.0/norm);
if (norm == 0 || fabs(norm-lambda)/norm < eps) break;
lambda=norm;
}
return norm;
};
/// returns the value of the eigenvalue with the largest magnitude
/// using the power iteration.
template <typename T> inline T Matrix<T>::eigLargestMagnSym() const {
const int max_iter=1000;
const T eps=10e-6;
Vector<T> u(_m);
u.setAleat();
T norm = u.nrm2();
u.scal(1.0/norm);
Vector<T> up(u);
T lambda=T();
for (int i = 0; i<max_iter; ++i) {
mult(u,up);
u.copy(up);
norm=u.nrm2();
if (fabs(norm-lambda) < eps) break;
lambda=norm;
u.scal(1.0/norm);
}
return norm;
};
/// inverse the matrix when it is symmetric
template <typename T> inline void Matrix<T>::invSym() {
sytri<T>(upper,_n,_X,_n);
this->fillSymmetric();
};
template <typename T> inline void Matrix<T>::invSymPos() {
potri<T>(upper,_n,_X,_n);
this->fillSymmetric();
};
/// perform b = alpha*A'x + beta*b
template <typename T> inline void Matrix<T>::multTrans(const Vector<T>& x,
Vector<T>& b, const T a, const T c) const {
b.resize(_n);
// assert(x._n == _m && b._n == _n);
cblas_gemv<T>(CblasColMajor,CblasTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1);
};
/// perform b = A'x, when x is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::multTrans(const SpVector<T,I>& x,
Vector<T>& b, const T alpha, const T beta) const {
b.resize(_n);
Vector<T> col;
if (beta) {
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
b._X[i] = alpha*col.dot(x);
}
} else {
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
b._X[i] = beta*b._X[i]+alpha*col.dot(x);
}
}
};
template <typename T> inline void Matrix<T>::multTrans(
const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const {
b.setZeros();
Vector<T> col;
bool* pr_active=active.rawX();
for (INTM i = 0; i<_n; ++i) {
if (pr_active[i]) {
this->refCol(i,col);
b._X[i]=col.dot(x);
}
}
};
/// perform b = alpha*A*x+beta*b
template <typename T> inline void Matrix<T>::mult(const Vector<T>& x,
Vector<T>& b, const T a, const T c) const {
// assert(x._n == _n && b._n == _m);
b.resize(_m);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1);
};
/// perform b = alpha*A*x+beta*b
template <typename T> inline void Matrix<T>::mult_loop(const Vector<T>& x,
Vector<T>& b) const {
b.resize(_m);
for (int ii=0; ii<_m; ++ii) {
b[ii]=cblas_dot<T>(_n,x._X,1,_X+ii,_m);
}
};
/// perform b = alpha*A*x + beta*b, when x is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::mult(const SpVector<T,I>& x,
Vector<T>& b, const T a, const T a2) const {
if (!a2) {
b.setZeros();
} else if (a2 != 1.0) {
b.scal(a2);
}
if (a == 1.0) {
for (INTM i = 0; i<x._L; ++i) {
cblas_axpy<T>(_m,x._v[i],_X+x._r[i]*_m,1,b._X,1);
}
} else {
for (INTM i = 0; i<x._L; ++i) {
cblas_axpy<T>(_m,a*x._v[i],_X+x._r[i]*_m,1,b._X,1);
}
}
};
/// perform C = a*A*B + b*C, possibly transposing A or B.
template <typename T> inline void Matrix<T>::mult(const Matrix<T>& B,
Matrix<T>& C, const bool transA, const bool transB,
const T a, const T b) const {
CBLAS_TRANSPOSE trA,trB;
INTM m,k,n;
if (transA) {
trA = CblasTrans;
m = _n;
k = _m;
} else {
trA= CblasNoTrans;
m = _m;
k = _n;
}
if (transB) {
trB = CblasTrans;
n = B._m;
//assert(B._n == k);
} else {
trB = CblasNoTrans;
n = B._n;
//assert(B._m == k);
}
C.resize(m,n);
cblas_gemm<T>(CblasColMajor,trA,trB,m,n,k,a,_X,_m,B._X,B._m,
b,C._X,C._m);
};
/// perform C = a*B*A + b*C, possibly transposing A or B.
template <typename T>
inline void Matrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C,
const bool transA, const bool transB,
const T a, const T b) const {
B.mult(*this,C,transB,transA,a,b);
};
/// perform C = A*B, when B is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::mult(const SpMatrix<T,I>& B, Matrix<T>& C,
const bool transA, const bool transB,
const T a, const T b) const {
if (transA) {
if (transB) {
C.resize(_n,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> rowC(B.m());
Vector<T> colA;
for (INTM i = 0; i<_n; ++i) {
this->refCol(i,colA);
B.mult(colA,rowC,a);
C.addRow(i,rowC,a);
}
} else {
C.resize(_n,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> colC;
SpVector<T,I> colB;
for (INTM i = 0; i<B.n(); ++i) {
C.refCol(i,colC);
B.refCol(i,colB);
this->multTrans(colB,colC,a,T(1.0));
}
}
} else {
if (transB) {
C.resize(_m,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> colA;
SpVector<T,I> colB;
for (INTM i = 0; i<_n; ++i) {
this->refCol(i,colA);
B.refCol(i,colB);
C.rank1Update(colA,colB,a);
}
} else {
C.resize(_m,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> colC;
SpVector<T,I> colB;
for (INTM i = 0; i<B.n(); ++i) {
C.refCol(i,colC);
B.refCol(i,colB);
this->mult(colB,colC,a,T(1.0));
}
}
};
}
/// mult by a diagonal matrix on the left
template <typename T>
inline void Matrix<T>::multDiagLeft(const Vector<T>& diag) {
if (diag.n() != _m)
return;
T* d = diag.rawX();
for (INTM i = 0; i< _n; ++i) {
for (INTM j = 0; j<_m; ++j) {
_X[i*_m+j] *= d[j];
}
}
};
/// mult by a diagonal matrix on the right
template <typename T> inline void Matrix<T>::multDiagRight(
const Vector<T>& diag) {
if (diag.n() != _n)
return;
T* d = diag.rawX();
for (INTM i = 0; i< _n; ++i) {
for (INTM j = 0; j<_m; ++j) {
_X[i*_m+j] *= d[i];
}
}
};
/// mult by a diagonal matrix on the right
template <typename T> inline void Matrix<T>::AddMultDiagRight(
const Vector<T>& diag, Matrix<T>& mat) {
if (diag.n() != _n)
return;
mat.resize(_m,_n);
//mat.setZeros();
T* d = diag.rawX();
for (INTM i = 0; i< _n; ++i) {
cblas_axpy<T>(_m,d[i],_X+i*_m,1,mat._X+i*_m,1);
}
};
/// C = A .* B, elementwise multiplication
template <typename T> inline void Matrix<T>::mult_elementWise(
const Matrix<T>& B, Matrix<T>& C) const {
assert(_n == B._n && _m == B._m);
C.resize(_m,_n);
vMul<T>(_n*_m,_X,B._X,C._X);
};
/// C = A .* B, elementwise multiplication
template <typename T> inline void Matrix<T>::div_elementWise(
const Matrix<T>& B, Matrix<T>& C) const {
assert(_n == B._n && _m == B._m);
C.resize(_m,_n);
vDiv<T>(_n*_m,_X,B._X,C._X);
};
/// XtX = A'*A
template <typename T> inline void Matrix<T>::XtX(Matrix<T>& xtx) const {
xtx.resize(_n,_n);
cblas_syrk<T>(CblasColMajor,CblasUpper,CblasTrans,_n,_m,T(1.0),
_X,_m,T(),xtx._X,_n);
xtx.fillSymmetric();
};
/// XXt = A*At
template <typename T> inline void Matrix<T>::XXt(Matrix<T>& xxt) const {
xxt.resize(_m,_m);
cblas_syrk<T>(CblasColMajor,CblasUpper,CblasNoTrans,_m,_n,T(1.0),
_X,_m,T(),xxt._X,_m);
xxt.fillSymmetric();
};
/// XXt = A*A' where A is an upper triangular matrix
template <typename T> inline void Matrix<T>::upperTriXXt(Matrix<T>& XXt, const INTM L) const {
XXt.resize(L,L);
for (INTM i = 0; i<L; ++i) {
cblas_syr<T>(CblasColMajor,CblasUpper,i+1,T(1.0),_X+i*_m,1,XXt._X,L);
}
XXt.fillSymmetric();
}
/// extract the diagonal
template <typename T> inline void Matrix<T>::diag(Vector<T>& dv) const {
INTM size_diag=MIN(_n,_m);
dv.resize(size_diag);
T* const d = dv.rawX();
for (INTM i = 0; i<size_diag; ++i)
d[i]=_X[i*_m+i];
};
/// set the diagonal
template <typename T> inline void Matrix<T>::setDiag(const Vector<T>& dv) {
INTM size_diag=MIN(_n,_m);
T* const d = dv.rawX();
for (INTM i = 0; i<size_diag; ++i)
_X[i*_m+i]=d[i];
};
/// set the diagonal
template <typename T> inline void Matrix<T>::setDiag(const T val) {
INTM size_diag=MIN(_n,_m);
for (INTM i = 0; i<size_diag; ++i)
_X[i*_m+i]=val;
};
/// each element of the matrix is replaced by its exponential
template <typename T> inline void Matrix<T>::exp() {
vExp<T>(_n*_m,_X,_X);
};
/// each element of the matrix is replaced by its exponential
template <typename T> inline void Matrix<T>::pow(const T a) {
vPowx<T>(_n*_m,_X,a,_X);
};
template <typename T> inline void Matrix<T>::sqr() {
vSqr<T>(_n*_m,_X,_X);
};
template <typename T> inline void Matrix<T>::Sqrt() {
vSqrt<T>(_n*_m,_X,_X);
};
template <typename T> inline void Matrix<T>::Invsqrt() {
vInvSqrt<T>(_n*_m,_X,_X);
};
/// return vec1'*A*vec2, where vec2 is sparse
template <typename T>
template <typename I>
inline T Matrix<T>::quad(const SpVector<T,I>& vec) const {
T sum = T();
INTM L = vec._L;
I* r = vec._r;
T* v = vec._v;
for (INTM i = 0; i<L; ++i)
for (INTM j = 0; j<L; ++j)
sum += _X[r[i]*_m+r[j]]*v[i]*v[j];
return sum;
};
template <typename T>
template <typename I>
inline void Matrix<T>::quad_mult(const Vector<T>& vec1,
const SpVector<T,I>& vec2, Vector<T>& y, const T a, const T b) const {
const INTM size_y= y.n();
const INTM nn = _n/size_y;
//y.resize(size_y);
//y.setZeros();
Matrix<T> tmp;
for (INTM i = 0; i<size_y; ++i) {
tmp.setData(_X+(i*nn)*_m,_m,nn);
y[i]=b*y[i]+a*tmp.quad(vec1,vec2);
}
}
/// return vec'*A*vec when vec is sparse
template <typename T>
template <typename I>
inline T Matrix<T>::quad(
const Vector<T>& vec1, const SpVector<T,I>& vec) const {
T sum = T();
INTM L = vec._L;
I* r = vec._r;
T* v = vec._v;
Vector<T> col;
for (INTM i = 0; i<L; ++i) {
this->refCol(r[i],col);
sum += v[i]*col.dot(vec1);
}
return sum;
};
/// add alpha*mat to the current matrix
template <typename T> inline void Matrix<T>::add(const Matrix<T>& mat, const T alpha) {
assert(mat._m == _m && mat._n == _n);
cblas_axpy<T>(_n*_m,alpha,mat._X,1,_X,1);
};
/// add alpha*mat to the current matrix
template <typename T> inline void Matrix<T>::add_scal(const Matrix<T>& mat, const T alpha, const T beta) {
assert(mat._m == _m && mat._n == _n);
cblas_axpby<T>(_n*_m,alpha,mat._X,1,beta,_X,1);
};
/// add alpha*mat to the current matrix
template <typename T> inline T Matrix<T>::dot(const Matrix<T>& mat) const {
assert(mat._m == _m && mat._n == _n);
return cblas_dot<T>(_n*_m,mat._X,1,_X,1);
};
/// add alpha to the current matrix
template <typename T> inline void Matrix<T>::add(const T alpha) {
for (INTM i = 0; i<_n*_m; ++i) _X[i]+=alpha;
};
/// substract the matrix mat to the current matrix
template <typename T> inline void Matrix<T>::sub(const Matrix<T>& mat) {
vSub<T>(_n*_m,_X,mat._X,_X);
};
/// compute the sum of the magnitude of the matrix values
template <typename T> inline T Matrix<T>::asum() const {
return cblas_asum<T>(_n*_m,_X,1);
};
template <typename T> inline T Matrix<T>::sum() const {
T sum=0;
for (INTM i =0; i<_n*_m; ++i) sum+=_X[i];
return sum;
};
/// returns the trace of the matrix
template <typename T> inline T Matrix<T>::trace() const {
T sum=T();
INTM m = MIN(_n,_m);
for (INTM i = 0; i<m; ++i)
sum += _X[i*_m+i];
return sum;
};
/// return ||A||_F
template <typename T> inline T Matrix<T>::normF() const {
return cblas_nrm2<T>(_n*_m,_X,1);
};
template <typename T> inline T Matrix<T>::mean() const {
Vector<T> vec;
this->toVect(vec);
return vec.mean();
};
template <typename T> inline T Matrix<T>::abs_mean() const {
Vector<T> vec;
this->toVect(vec);
return vec.abs_mean();
};
/// return ||A||_F^2
template <typename T> inline T Matrix<T>::normFsq() const {
return cblas_dot<T>(_n*_m,_X,1,_X,1);
};
/// return ||At||_{inf,2}
template <typename T> inline T Matrix<T>::norm_inf_2_col() const {
Vector<T> col;
T max = -1.0;
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
T norm_col = col.nrm2();
if (norm_col > max)
max = norm_col;
}
return max;
};
/// return ||At||_{1,2}
template <typename T> inline T Matrix<T>::norm_1_2_col() const {
Vector<T> col;
T sum = 0.0;
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
sum += col.nrm2();
}
return sum;
};
/// returns the l2 norms of the columns
template <typename T> inline void Matrix<T>::norm_2_rows(
Vector<T>& norms) const {
norms.resize(_m);
norms.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
norms[j] += _X[i*_m+j]*_X[i*_m+j];
for (INTM j = 0; j<_m; ++j)
norms[j]=sqrt(norms[j]);
};
/// returns the l2 norms of the columns
template <typename T> inline void Matrix<T>::norm_2sq_rows(
Vector<T>& norms) const {
norms.resize(_m);
norms.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
norms[j] += _X[i*_m+j]*_X[i*_m+j];
};
/// returns the l2 norms of the columns
template <typename T> inline void Matrix<T>::norm_2_cols(
Vector<T>& norms) const {
norms.resize(_n);
Vector<T> col;
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
norms[i] = col.nrm2();
}
};
/// returns the linf norms of the columns
template <typename T> inline void Matrix<T>::norm_inf_cols(Vector<T>& norms) const {
norms.resize(_n);
Vector<T> col;
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
norms[i] = col.fmaxval();
}
};
/// returns the linf norms of the columns
template <typename T> inline void Matrix<T>::norm_inf_rows(Vector<T>& norms) const {
norms.resize(_m);
norms.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
norms[j] = MAX(abs<T>(_X[i*_m+j]),norms[j]);
};
template <typename T> inline void Matrix<T>::get_sum_cols(Vector<T>& sum) const {
sum.resize(_n);
for (INTM i = 0; i<_n; ++i) {
sum[i]=0;
for (INTM j = 0; j<_m; ++j)
sum[i] += (_X[i*_m+j]);
}
};
template <typename T> inline void Matrix<T>::dot_col(const Matrix<T>& mat,
Vector<T>& dots) const {
dots.resize(_n);
for (INTM i = 0; i<_n; ++i)
dots[i] = cblas_dot<T>(_m,_X+i*_m,1,mat._X+i*_m,1);
}
/// returns the linf norms of the columns
template <typename T> inline void Matrix<T>::norm_l1_rows(Vector<T>& norms) const {
norms.resize(_m);
norms.setZeros();
for (INTM i = 0; i<_n; ++i)
for (INTM j = 0; j<_m; ++j)
norms[j] += abs<T>(_X[i*_m+j]);
};
/// returns the l2 norms of the columns
template <typename T> inline void Matrix<T>::norm_2sq_cols(
Vector<T>& norms) const {
norms.resize(_n);
Vector<T> col;
for (INTM i = 0; i<_n; ++i) {
refCol(i,col);
norms[i] = col.nrm2sq();
}
};
template <typename T>
inline void Matrix<T>::sum_cols(Vector<T>& sum) const {
sum.resize(_m);
sum.setZeros();
Vector<T> tmp;
for (INTM i = 0; i<_n; ++i) {
this->refCol(i,tmp);
sum.add(tmp);
}
};
/// Compute the mean of the columns
template <typename T> inline void Matrix<T>::meanCol(Vector<T>& mean) const {
Vector<T> ones(_n);
ones.set(T(1.0/_n));
this->mult(ones,mean,1.0,0.0);
};
/// Compute the mean of the rows
template <typename T> inline void Matrix<T>::meanRow(Vector<T>& mean) const {
Vector<T> ones(_m);
ones.set(T(1.0/_m));
this->multTrans(ones,mean,1.0,0.0);
};
/// fill the matrix with the row given
template <typename T> inline void Matrix<T>::fillRow(const Vector<T>& row) {
for (INTM i = 0; i<_n; ++i) {
T val = row[i];
for (INTM j = 0; j<_m; ++j) {
_X[i*_m+j]=val;
}
}
};
/// fill the matrix with the row given
template <typename T> inline void Matrix<T>::extractRow(const INTM j,
Vector<T>& row) const {
row.resize(_n);
for (INTM i = 0; i<_n; ++i) {
row[i]=_X[i*_m+j];
}
};
/// fill the matrix with the row given
template <typename T> inline void Matrix<T>::setRow(const INTM j,
const Vector<T>& row) {
for (INTM i = 0; i<_n; ++i) {
_X[i*_m+j]=row[i];
}
};
/// fill the matrix with the row given
template <typename T> inline void Matrix<T>::addRow(const INTM j,
const Vector<T>& row, const T a) {
if (a==1.0) {
for (INTM i = 0; i<_n; ++i) {
_X[i*_m+j]+=row[i];
}
} else {
for (INTM i = 0; i<_n; ++i) {
_X[i*_m+j]+=a*row[i];
}
}
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::softThrshold(const T nu) {
Vector<T> vec;
toVect(vec);
vec.softThrshold(nu);
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::fastSoftThrshold(const T nu) {
Vector<T> vec;
toVect(vec);
vec.fastSoftThrshold(nu);
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::fastSoftThrshold(Matrix<T>& output, const T nu) const {
output.resize(_m,_n,false);
Vector<T> vec, vec2;
toVect(vec);
output.toVect(vec2);
vec.fastSoftThrshold(vec2,nu);
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::hardThrshold(const T nu) {
Vector<T> vec;
toVect(vec);
vec.hardThrshold(nu);
};
/// perform thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::thrsmax(const T nu) {
Vector<T> vec;
toVect(vec);
vec.thrsmax(nu);
};
/// perform thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::thrsmin(const T nu) {
Vector<T> vec;
toVect(vec);
vec.thrsmin(nu);
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::inv_elem() {
Vector<T> vec;
toVect(vec);
vec.inv();
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::blockThrshold(const T nu,
const INTM sizeGroup) {
for (INTM i = 0; i<_n; ++i) {
INTM j;
for (j = 0; j<_m-sizeGroup+1; j+=sizeGroup) {
T nrm=0;
for (INTM k = 0; k<sizeGroup; ++k)
nrm += _X[i*_m +j+k]*_X[i*_m +j+k];
nrm=sqrt(nrm);
if (nrm < nu) {
for (INTM k = 0; k<sizeGroup; ++k)
_X[i*_m +j+k]=0;
} else {
T scal = (nrm-nu)/nrm;
for (INTM k = 0; k<sizeGroup; ++k)
_X[i*_m +j+k]*=scal;
}
}
j -= sizeGroup;
for ( ; j<_m; ++j)
_X[j]=softThrs<T>(_X[j],nu);
}
}
template <typename T> inline void Matrix<T>::sparseProject(Matrix<T>& Y,
const T thrs, const int mode, const T lambda1,
const T lambda2, const T lambda3, const bool pos,
const int numThreads) {
int NUM_THREADS=init_omp(numThreads);
Vector<T>* XXT= new Vector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
XXT[i].resize(_m);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< _n; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
this->refCol(i,Xi);
Vector<T> Yi;
Y.refCol(i,Yi);
Vector<T>& XX = XXT[numT];
XX.copy(Xi);
XX.sparseProject(Yi,thrs,mode,lambda1,lambda2,lambda3,pos);
}
delete[](XXT);
};
/// perform soft-thresholding of the matrix, with the threshold nu
template <typename T> inline void Matrix<T>::thrsPos() {
Vector<T> vec;
toVect(vec);
vec.thrsPos();
};
/// perform A <- A + alpha*vec1*vec2'
template <typename T> inline void Matrix<T>::rank1Update(
const Vector<T>& vec1, const Vector<T>& vec2, const T alpha) {
cblas_ger<T>(CblasColMajor,_m,_n,alpha,vec1._X,1,vec2._X,1,_X,_m);
};
/// perform A <- A + alpha*vec1*vec2', when vec1 is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::rank1Update(
const SpVector<T,I>& vec1, const Vector<T>& vec2, const T alpha) {
I* r = vec1._r;
T* v = vec1._v;
T* X2 = vec2._X;
assert(vec2._n == _n);
if (alpha == 1.0) {
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[i*_m+r[j]] += v[j]*X2[i];
}
}
} else {
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[i*_m+r[j]] += alpha*v[j]*X2[i];
}
}
}
};
template <typename T>
template <typename I>
inline void Matrix<T>::rank1Update_mult(const Vector<T>& vec1,
const Vector<T>& vec1b,
const SpVector<T,I>& vec2,
const T alpha) {
const INTM nn = vec1b.n();
const INTM size_A = _n/nn;
Matrix<T> tmp;
for (INTM i = 0; i<nn; ++i) {
tmp.setData(_X+i*size_A*_m,_m,size_A);
tmp.rank1Update(vec1,vec2,alpha*vec1b[i]);
}
};
/// perform A <- A + alpha*vec1*vec2', when vec1 is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::rank1Update(
const SpVector<T,I>& vec1, const SpVector<T,I>& vec2, const T alpha) {
I* r = vec1._r;
T* v = vec1._v;
T* v2 = vec2._v;
I* r2 = vec2._r;
if (alpha == 1.0) {
for (INTM i = 0; i<vec2._L; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[r2[i]*_m+r[j]] += v[j]*v2[i];
}
}
} else {
for (INTM i = 0; i<vec2._L; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[r[i]*_m+r[j]] += alpha*v[j]*v2[i];
}
}
}
};
/// perform A <- A + alpha*vec1*vec2', when vec2 is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::rank1Update(
const Vector<T>& vec1, const SpVector<T,I>& vec2, const T alpha) {
I* r = vec2._r;
T* v = vec2._v;
Vector<T> Xi;
for (INTM i = 0; i<vec2._L; ++i) {
this->refCol(r[i],Xi);
Xi.add(vec1,v[i]*alpha);
}
};
/// perform A <- A + alpha*vec1*vec1', when vec1 is sparse
template <typename T>
template <typename I>
inline void Matrix<T>::rank1Update(
const SpVector<T,I>& vec1, const T alpha) {
I* r = vec1._r;
T* v = vec1._v;
if (alpha == 1.0) {
for (INTM i = 0; i<vec1._L; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[r[i]*_m+r[j]] += v[j]*v[i];
}
}
} else {
for (INTM i = 0; i<vec1._L; ++i) {
for (INTM j = 0; j<vec1._L; ++j) {
_X[_m*r[i]+r[j]] += alpha*v[j]*v[i];
}
}
}
};
/// compute x, such that b = Ax,
template <typename T> inline void Matrix<T>::conjugateGradient(
const Vector<T>& b, Vector<T>& x, const T tol, const int itermax) const {
Vector<T> R,P,AP;
R.copy(b);
this->mult(x,R,T(-1.0),T(1.0));
P.copy(R);
int k = 0;
T normR = R.nrm2sq();
T alpha;
while (normR > tol && k < itermax) {
this->mult(P,AP);
alpha = normR/P.dot(AP);
x.add(P,alpha);
R.add(AP,-alpha);
T tmp = R.nrm2sq();
P.scal(tmp/normR);
normR = tmp;
P.add(R,T(1.0));
++k;
};
};
template <typename T> inline void Matrix<T>::drop(char* fileName) const {
std::ofstream f;
f.precision(12);
f.flags(std::ios_base::scientific);
f.open(fileName, ofstream::trunc);
std::cout << "Matrix written in " << fileName << std::endl;
for (INTM i = 0; i<_n; ++i) {
for (INTM j = 0; j<_m; ++j)
f << _X[i*_m+j] << " ";
f << std::endl;
}
f.close();
};
/// compute a Nadaraya Watson estimator
template <typename T> inline void Matrix<T>::NadarayaWatson(
const Vector<INTM>& ind, const T sigma) {
if (ind.n() != _n) return;
init_omp(MAX_THREADS);
const INTM Ngroups=ind.maxval();
INTM i;
#pragma omp parallel for private(i)
for (i = 1; i<=Ngroups; ++i) {
Vector<INTM> indicesGroup(_n);
INTM count = 0;
for (INTM j = 0; j<_n; ++j)
if (ind[j] == i) indicesGroup[count++]=j;
Matrix<T> Xm(_m,count);
Vector<T> col, col2;
for (INTM j= 0; j<count; ++j) {
this->refCol(indicesGroup[j],col);
Xm.refCol(j,col2);
col2.copy(col);
}
Vector<T> norms;
Xm.norm_2sq_cols(norms);
Matrix<T> weights;
Xm.XtX(weights);
weights.scal(T(-2.0));
Vector<T> ones(Xm.n());
ones.set(T(1.0));
weights.rank1Update(ones,norms);
weights.rank1Update(norms,ones);
weights.scal(-sigma);
weights.exp();
Vector<T> den;
weights.mult(ones,den);
den.inv();
weights.multDiagRight(den);
Matrix<T> num;
Xm.mult(weights,num);
for (INTM j= 0; j<count; ++j) {
this->refCol(indicesGroup[j],col);
num.refCol(j,col2);
col.copy(col2);
}
}
};
/// make a sparse copy of the current matrix
template <typename T> inline void Matrix<T>::toSparse(SpMatrix<T>& out) const {
out.clear();
INTM count=0;
INTM* pB;
#pragma omp critical
{
pB=new INTM[_n+1];
}
INTM* pE=pB+1;
for (INTM i = 0; i<_n*_m; ++i)
if (_X[i] != 0) ++count;
INTM* r;
T* v;
#pragma omp critical
{
r=new INTM[count];
v=new T[count];
}
count=0;
for (INTM i = 0; i<_n; ++i) {
pB[i]=count;
for (INTM j = 0; j<_m; ++j) {
if (_X[i*_m+j] != 0) {
v[count]=_X[i*_m+j];
r[count++]=j;
}
}
pE[i]=count;
}
out._v=v;
out._r=r;
out._pB=pB;
out._pE=pE;
out._m=_m;
out._n=_n;
out._nzmax=count;
out._externAlloc=false;
};
/// make a sparse copy of the current matrix
template <typename T> inline void Matrix<T>::toSparseTrans(
SpMatrix<T>& out) {
out.clear();
INTM count=0;
INTM* pB;
#pragma omp critical
{
pB=new INTM[_m+1];
}
INTM* pE=pB+1;
for (INTM i = 0; i<_n*_m; ++i)
if (_X[i] != 0) ++count;
INTM* r;
T* v;
#pragma omp critical
{
r=new INTM[count];
v=new T[count];
}
count=0;
for (INTM i = 0; i<_m; ++i) {
pB[i]=count;
for (INTM j = 0; j<_n; ++j) {
if (_X[i+j*_m] != 0) {
v[count]=_X[j*_m+i];
r[count++]=j;
}
}
pE[i]=count;
}
out._v=v;
out._r=r;
out._pB=pB;
out._pE=pE;
out._m=_n;
out._n=_m;
out._nzmax=count;
out._externAlloc=false;
};
/// make a reference of the matrix to a vector vec
template <typename T> inline void Matrix<T>::toVect(
Vector<T>& vec) const {
vec.clear();
vec._externAlloc=true;
vec._n=_n*_m;
vec._X=_X;
};
/* ***********************************
* Implementation of the class Vector
* ***********************************/
/// Empty constructor
template <typename T> Vector<T>::Vector() :
_externAlloc(true), _X(NULL), _n(0) { };
/// Constructor. Create a new vector of size n
template <typename T> Vector<T>::Vector(INTM n) :
_externAlloc(false), _n(n) {
#pragma omp critical
{
_X=new T[_n];
}
};
/// Constructor with existing data
template <typename T> Vector<T>::Vector(T* X, INTM n) :
_externAlloc(true), _X(X), _n(n) { };
/// Copy constructor
template <typename T> Vector<T>::Vector(const Vector<T>& vec) :
_externAlloc(false), _n(vec._n) {
#pragma omp critical
{
_X=new T[_n];
}
cblas_copy<T>(_n,vec._X,1,_X,1);
};
/// Destructor
template <typename T> Vector<T>::~Vector() {
clear();
};
/// Print the matrix to std::cout
template <typename T> inline void Vector<T>::print(const string& name) const {
std::cerr << name << std::endl;
std::cerr << _n << std::endl;
for (INTM j = 0; j<_n; ++j) {
printf("%10.5g ",static_cast<double>(_X[j]));
}
printf("\n ");
};
/// Print the matrix to std::cout
template <typename T> inline void Vector<T>::dump(const string& name) const {
ofstream f;
f.open(name);
f.precision(20);
std::cerr << name << std::endl;
f << _n << std::endl;
for (INTM j = 0; j<_n; ++j) {
f << static_cast<double>(_X[j]) << " ";
}
f << std::endl;
f.close();
};
/// Print the vector to std::cout
template <> inline void Vector<double>::print(const char* name) const {
printf("%s, %d\n",name,(int)_n);
for (INTM i = 0; i<_n; ++i) {
printf("%g ",_X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<float>::print(const char* name) const {
printf("%s, %d\n",name,(int)_n);
for (INTM i = 0; i<_n; ++i) {
printf("%g ",_X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<int>::print(const char* name) const {
printf("%s, %d\n",name,(int)_n);
for (INTM i = 0; i<_n; ++i) {
printf("%d ",_X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<bool>::print(const char* name) const {
printf("%s, %d\n",name,(int)_n);
for (INTM i = 0; i<_n; ++i) {
printf("%d ",_X[i] ? 1 : 0);
}
printf("\n");
};
/// returns the index of the largest value
template <typename T> inline INTM Vector<T>::max() const {
INTM imax=0;
T max=_X[0];
for (INTM j = 1; j<_n; ++j) {
T cur = _X[j];
if (cur > max) {
imax=j;
max = cur;
}
}
return imax;
};
/// returns the index of the minimum value
template <typename T> inline INTM Vector<T>::min() const {
INTM imin=0;
T min=_X[0];
for (INTM j = 1; j<_n; ++j) {
T cur = _X[j];
if (cur < min) {
imin=j;
min = cur;
}
}
return imin;
};
/// returns the maximum value
template <typename T> inline T Vector<T>::maxval() const {
return _X[this->max()];
};
/// returns the minimum value
template <typename T> inline T Vector<T>::minval() const {
return _X[this->min()];
};
/// returns the maximum magnitude
template <typename T> inline T Vector<T>::fmaxval() const {
return fabs(_X[this->fmax()]);
};
/// returns the minimum magnitude
template <typename T> inline T Vector<T>::fminval() const {
return fabs(_X[this->fmin()]);
};
template <typename T>
inline void Vector<T>::logspace(const INTM n, const T a, const T b) {
T first=log10(a);
T last=log10(b);
T step = (last-first)/(n-1);
this->resize(n);
_X[0]=first;
for (INTM i = 1; i<_n; ++i)
_X[i]=_X[i-1]+step;
for (INTM i = 0; i<_n; ++i)
_X[i]=pow(T(10.0),_X[i]);
}
template <typename T>
inline INTM Vector<T>::nnz() const {
INTM sum=0;
for (INTM i = 0; i<_n; ++i)
if (_X[i] != T()) ++sum;
return sum;
};
/// generate logarithmically spaced values
template <>
inline void Vector<INTM>::logspace(const INTM n, const INTM a, const INTM b) {
Vector<double> tmp(n);
tmp.logspace(n,double(a),double(b));
this->resize(n);
_X[0]=a;
_X[n-1]=b;
for (INTM i = 1; i<_n-1; ++i) {
INTM candidate=static_cast<INTM>(floor(static_cast<double>(tmp[i])));
_X[i]= candidate > _X[i-1] ? candidate : _X[i-1]+1;
}
}
/// returns the index of the value with largest magnitude
template <typename T> inline INTM Vector<T>::fmax() const {
return cblas_iamax<T>(_n,_X,1);
};
/// returns the index of the value with smallest magnitude
template <typename T> inline INTM Vector<T>::fmin() const {
return cblas_iamin<T>(_n,_X,1);
};
/// returns a reference to X[index]
template <typename T> inline T& Vector<T>::operator[] (const INTM i) {
assert(i>=0 && i<_n);
return _X[i];
};
/// returns X[index]
template <typename T> inline T Vector<T>::operator[] (const INTM i) const {
assert(i>=0 && i<_n);
return _X[i];
};
/// make a copy of x
template <typename T> inline void Vector<T>::copy(const Vector<T>& x) {
if (_X != x._X) {
this->resize(x.n());
//cblas_copy<T>(_n,x._X,1,_X,1);
memcpy(_X,x._X,_n*sizeof(T));
}
};
/// make a copy of x
template <typename T> inline void Vector<T>::copyRef(const Vector<T>& x) {
this->setData(x.rawX(),x.n());
};
/// Set all values to zero
template <typename T> inline void Vector<T>::setZeros() {
memset(_X,0,_n*sizeof(T));
};
/// resize the vector
template <typename T> inline void Vector<T>::resize(const INTM n, const bool set_zeros) {
if (_n == n) return;
clear();
#pragma omp critical
{
_X=new T[n];
}
_n=n;
_externAlloc=false;
if (set_zeros)
this->setZeros();
};
/// change the data of the vector
template <typename T> inline void Vector<T>::setPointer(T* X, const INTM n) {
clear();
_externAlloc=true;
_X=X;
_n=n;
};
/// put a random permutation of size n (for integral vectors)
template <> inline void Vector<int>::randi(int n) {
for (int i = 0; i<_n; ++i)
_X[i]=static_cast<int>(random() % n);
};
/// put a random permutation of size n (for integral vectors)
template <> inline void Vector<int>::randperm(int n) {
resize(n);
Vector<int> table(n);
for (int i = 0; i<n; ++i)
table[i]=i;
int size=n;
for (int i = 0; i<n; ++i) {
const int ind=random() % size;
_X[i]=table[ind];
table[ind]=table[size-1];
--size;
}
};
/// put random values in the vector (white Gaussian Noise)
template <typename T> inline void Vector<T>::setAleat() {
for (INTM i = 0; i<_n; ++i) _X[i]=normalDistrib<T>();
};
/// clear the vector
template <typename T> inline void Vector<T>::clear() {
if (!_externAlloc) delete[](_X);
_n=0;
_X=NULL;
_externAlloc=true;
};
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::softThrshold(const T nu) {
for (INTM i = 0; i<_n; ++i) {
if (_X[i] > nu) {
_X[i] -= nu;
} else if (_X[i] < -nu) {
_X[i] += nu;
} else {
_X[i] = 0;
}
}
};
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::fastSoftThrshold(const T nu) {
//#pragma omp parallel for
for (INTM i = 0; i<_n; ++i)
_X[i]=fastSoftThrs(_X[i],nu);
};
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::fastSoftThrshold(Vector<T>& output, const T nu) const {
output.resize(_n,false);
//#pragma omp parallel for
for (INTM i = 0; i<_n; ++i)
output[i]=fastSoftThrs(_X[i],nu);
};
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::softThrsholdScal(Vector<T>& out, const T nu, const T s) {
T* Y = out.rawX();
for (INTM i = 0; i<_n; ++i) {
if (_X[i] > nu) {
Y[i] = s*(_X[i]-nu);
} else if (_X[i] < -nu) {
Y[i] = s*(_X[i]+nu);
} else {
Y[i] = 0;
}
}
};
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::hardThrshold(const T nu) {
for (INTM i = 0; i<_n; ++i) {
if (!(_X[i] > nu || _X[i] < -nu)) {
_X[i] = 0;
}
}
};
/// performs thresholding of the vector
template <typename T> inline void Vector<T>::thrsmax(const T nu) {
//#pragma omp parallel for private(i)
for (INTM i = 0; i<_n; ++i)
if (_X[i] < nu) _X[i]=nu;
}
/// performs thresholding of the vector
template <typename T> inline void Vector<T>::thrsmin(const T nu) {
for (INTM i = 0; i<_n; ++i)
_X[i]=MIN(_X[i],nu);
}
/// performs thresholding of the vector
template <typename T> inline void Vector<T>::thrsabsmin(const T nu) {
for (INTM i = 0; i<_n; ++i)
_X[i]=MAX(MIN(_X[i],nu),-nu);
}
/// performs thresholding of the vector
template <typename T> inline void Vector<T>::thrshold(const T nu) {
for (INTM i = 0; i<_n; ++i)
if (abs<T>(_X[i]) < nu)
_X[i]=0;
}
/// performs soft-thresholding of the vector
template <typename T> inline void Vector<T>::thrsPos() {
for (INTM i = 0; i<_n; ++i) {
if (_X[i] < 0) _X[i]=0;
}
};
template <>
inline bool Vector<bool>::alltrue() const {
for (INTM i = 0; i<_n; ++i) {
if (!_X[i]) return false;
}
return true;
};
template <>
inline bool Vector<bool>::allfalse() const {
for (INTM i = 0; i<_n; ++i) {
if (_X[i]) return false;
}
return true;
};
/// set each value of the vector to val
template <typename T> inline void Vector<T>::set(const T val) {
for (INTM i = 0; i<_n; ++i) _X[i]=val;
};
/// returns ||A||_2
template <typename T> inline T Vector<T>::nrm2() const {
return cblas_nrm2<T>(_n,_X,1);
};
/// returns ||A||_2^2
template <typename T> inline T Vector<T>::nrm2sq() const {
return cblas_dot<T>(_n,_X,1,_X,1);
};
/// returns A'x
template <typename T> inline T Vector<T>::dot(const Vector<T>& x) const {
assert(_n == x._n);
return cblas_dot<T>(_n,_X,1,x._X,1);
};
/// returns A'x, when x is sparse
template <typename T>
template <typename I>
inline T Vector<T>::dot(const SpVector<T,I>& x) const {
T sum=0;
const I* r = x.rawR();
const T* v = x.rawX();
for (INTT i = 0; i<x._L; ++i) {
sum += _X[r[i]]*v[i];
}
return sum;
//return cblas_doti<T>(x._L,x._v,x._r,_X);
};
/// A <- A + a*x
template <typename T> inline void Vector<T>::add(const Vector<T>& x, const T a) {
assert(_n == x._n);
cblas_axpy<T>(_n,a,x._X,1,_X,1);
};
template <typename T> inline void Vector<T>::add_scal(const Vector<T>& x, const T a, const T b) {
assert(_n == x._n);
cblas_axpby<T>(_n,a,x._X,1,b,_X,1);
};
/// A <- A + a*x
template <typename T>
template <typename I>
inline void Vector<T>::add(const SpVector<T,I>& x,
const T a) {
if (a == 1.0) {
for (INTM i = 0; i<x._L; ++i)
_X[x._r[i]]+=x._v[i];
} else {
for (INTM i = 0; i<x._L; ++i)
_X[x._r[i]]+=a*x._v[i];
}
};
/// A <- A + a*x
template <typename T>
template <typename I>
inline void Vector<T>::add_scal(const SpVector<T,I>& x,
const T a, const T b) {
if (b != T(1.0)) {
if (b==0) {
this->setZeros();
} else {
this->scal(b);
}
}
if (a == T(1.0)) {
for (I i = 0; i<x._L; ++i)
_X[x._r[i]]+=x._v[i];
} else {
for (I i = 0; i<x._L; ++i)
_X[x._r[i]]+=a*x._v[i];
}
};
/// adds a to each value in the vector
template <typename T> inline void Vector<T>::add(const T a) {
for (INTM i = 0; i<_n; ++i) _X[i]+=a;
};
/// A <- A - x
template <typename T> inline void Vector<T>::sub(const Vector<T>& x) {
assert(_n == x._n);
vSub<T>(_n,_X,x._X,_X);
};
/// A <- A + a*x
template <typename T>
template <typename I>
inline void Vector<T>::sub(const SpVector<T,I>& x) {
for (INTM i = 0; i<x._L; ++i)
_X[x._r[i]]-=x._v[i];
};
/// A <- A ./ x
template <typename T> inline void Vector<T>::div(const Vector<T>& x) {
assert(_n == x._n);
vDiv<T>(_n,_X,x._X,_X);
};
/// A <- x ./ y
template <typename T> inline void Vector<T>::div(const Vector<T>& x, const Vector<T>& y) {
assert(_n == x._n);
vDiv<T>(_n,x._X,y._X,_X);
};
/// A <- x .^ 2
template <typename T> inline void Vector<T>::sqr(const Vector<T>& x) {
this->resize(x._n);
vSqr<T>(_n,x._X,_X);
}
/// A <- x .^ 2
template <typename T> inline void Vector<T>::sqr() {
vSqr<T>(_n,_X,_X);
}
/// A <- x .^ 2
template <typename T> inline void Vector<T>::Invsqrt(const Vector<T>& x) {
this->resize(x._n);
vInvSqrt<T>(_n,x._X,_X);
}
/// A <- x .^ 2
template <typename T> inline void Vector<T>::Sqrt(const Vector<T>& x) {
this->resize(x._n);
vSqrt<T>(_n,x._X,_X);
}
/// A <- x .^ 2
template <typename T> inline void Vector<T>::Invsqrt() {
vInvSqrt<T>(_n,_X,_X);
}
/// A <- x .^ 2
template <typename T> inline void Vector<T>::Sqrt() {
vSqrt<T>(_n,_X,_X);
}
/// A <- 1./x
template <typename T> inline void Vector<T>::inv(const Vector<T>& x) {
this->resize(x.n());
vInv<T>(_n,x._X,_X);
};
/// A <- 1./A
template <typename T> inline void Vector<T>::inv() {
vInv<T>(_n,_X,_X);
};
/// A <- x .* y
template <typename T> inline void Vector<T>::mult(const Vector<T>& x,
const Vector<T>& y) {
this->resize(x.n());
vMul<T>(_n,x._X,y._X,_X);
};
;
/// normalize the vector
template <typename T> inline void Vector<T>::normalize() {
T norm=nrm2();
if (norm > EPSILON) scal(1.0/norm);
};
/// normalize the vector
template <typename T> inline void Vector<T>::normalize2(const T thrs) {
T norm=nrm2();
if (norm > thrs) scal(thrs/norm);
};
/// whiten
template <typename T> inline void Vector<T>::whiten(
Vector<T>& meanv, const bool pattern) {
if (pattern) {
const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_n)));
INTM count[4];
for (INTM i = 0; i<4; ++i) count[i]=0;
INTM offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
meanv[2*offsetx+offsety]+=_X[j*n+k];
count[2*offsetx+offsety]++;
}
}
for (INTM i = 0; i<4; ++i)
meanv[i] /= count[i];
offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
_X[j*n+k]-=meanv[2*offsetx+offsety];
}
}
} else {
const INTM V = meanv.n();
const INTM sizePatch=_n/V;
for (INTM j = 0; j<V; ++j) {
T mean = 0;
for (INTM k = 0; k<sizePatch; ++k) {
mean+=_X[sizePatch*j+k];
}
mean /= sizePatch;
for (INTM k = 0; k<sizePatch; ++k) {
_X[sizePatch*j+k]-=mean;
}
meanv[j]=mean;
}
}
};
/// whiten
template <typename T> inline void Vector<T>::whiten(
Vector<T>& meanv, const Vector<T>& mask) {
const INTM V = meanv.n();
const INTM sizePatch=_n/V;
for (INTM j = 0; j<V; ++j) {
T mean = 0;
for (INTM k = 0; k<sizePatch; ++k) {
mean+=_X[sizePatch*j+k];
}
mean /= cblas_asum(sizePatch,mask._X+j*sizePatch,1);
for (INTM k = 0; k<sizePatch; ++k) {
if (mask[sizePatch*j+k])
_X[sizePatch*j+k]-=mean;
}
meanv[j]=mean;
}
};
/// whiten
template <typename T> inline void Vector<T>::whiten(const INTM V) {
const INTM sizePatch=_n/V;
for (INTM j = 0; j<V; ++j) {
T mean = 0;
for (INTM k = 0; k<sizePatch; ++k) {
mean+=_X[sizePatch*j+k];
}
mean /= sizePatch;
for (INTM k = 0; k<sizePatch; ++k) {
_X[sizePatch*j+k]-=mean;
}
}
};
template <typename T> inline T Vector<T>::KL(const Vector<T>& Y) {
T sum = 0;
T* prY = Y.rawX();
for (INTM i = 0; i<_n; ++i) {
if (_X[i] > 1e-20) {
if (prY[i] < 1e-60) {
sum += 1e200;
} else {
sum += _X[i]*log_alt<T>(_X[i]/prY[i]);
}
//sum += _X[i]*log_alt<T>(_X[i]/(prY[i]+1e-100));
}
}
sum += T(-1.0) + Y.sum();
return sum;
};
/// unwhiten
template <typename T> inline void Vector<T>::unwhiten(
Vector<T>& meanv, const bool pattern) {
if (pattern) {
const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_n)));
INTM offsetx=0;
for (INTM j = 0; j<n; ++j) {
offsetx= (offsetx+1) % 2;
INTM offsety=0;
for (INTM k = 0; k<n; ++k) {
offsety= (offsety+1) % 2;
_X[j*n+k]+=meanv[2*offsetx+offsety];
}
}
} else {
const INTM V = meanv.n();
const INTM sizePatch=_n/V;
for (INTM j = 0; j<V; ++j) {
T mean = meanv[j];
for (INTM k = 0; k<sizePatch; ++k) {
_X[sizePatch*j+k]+=mean;
}
}
}
};
/// return the mean
template <typename T> inline T Vector<T>::mean() const {
return this->sum()/_n;
}
template <typename T> inline T Vector<T>::abs_mean() const {
return this->asum()/_n;
};
template <typename T> inline T Vector<T>::mean_non_uniform(const Vector<T>& qi) const {
Vector<T> tmp;
tmp.copy(*this);
tmp.mult(qi,tmp);
return tmp.sum();
};
/// return the std
template <typename T> inline T Vector<T>::std() {
T E = this->mean();
T std=0;
for (INTM i = 0; i<_n; ++i) {
T tmp=_X[i]-E;
std += tmp*tmp;
}
std /= _n;
return sqr_alt<T>(std);
}
/// scale the vector by a
template <typename T> inline void Vector<T>::scal(const T a) {
return cblas_scal<T>(_n,a,_X,1);
};
/// A <- -A
template <typename T> inline void Vector<T>::neg() {
for (INTM i = 0; i<_n; ++i) _X[i]=-_X[i];
};
/// replace each value by its exponential
template <typename T> inline void Vector<T>::exp() {
vExp<T>(_n,_X,_X);
};
/// replace each value by its absolute value
template <typename T> inline void Vector<T>::abs_vec() {
vAbs<T>(_n,_X,_X);
};
/// replace each value by its logarithm
template <typename T> inline void Vector<T>::log() {
for (INTM i=0; i<_n; ++i) _X[i]=alt_log<T>(_X[i]);
};
/// replace each value by its exponential
template <typename T> inline void Vector<T>::logexp() {
for (INTM i = 0; i<_n; ++i) {
_X[i]=logexp2(_X[i]);
/*if (_X[i] < -30) {
_X[i]=0;
} else if (_X[i] < 30) {
_X[i]= alt_log<T>( T(1.0) + exp_alt<T>( _X[i] ) );
}*/
}
};
template <typename T> inline T Vector<T>::logsumexp() {
T mm=this->maxval();
this->add(-mm);
this->exp();
return mm+alt_log<T>(this->asum());
};
/// replace each value by its exponential
template <typename T> inline T Vector<T>::softmax(const int y) {
this->add(-_X[y]);
_X[y]=-INFINITY;
T max=this->maxval();
if (max > 30) {
return max;
} else if (max < -30) {
return 0;
} else {
_X[y]=T(0.0);
this->exp();
return alt_log<T>(this->sum());
}
};
/// computes the sum of the magnitudes of the vector
template <typename T> inline T Vector<T>::asum() const {
return cblas_asum<T>(_n,_X,1);
};
template <typename T> inline T Vector<T>::lzero() const {
INTM count=0;
for (INTM i = 0; i<_n; ++i)
if (_X[i] != 0) ++count;
return count;
};
template <typename T> inline T Vector<T>::afused() const {
T sum = 0;
for (INTM i = 1; i<_n; ++i) {
sum += abs<T>(_X[i]-_X[i-1]);
}
return sum;
}
/// returns the sum of the vector
template <typename T> inline T Vector<T>::sum() const {
T sum=T();
for (INTM i = 0; i<_n; ++i) sum +=_X[i];
return sum;
};
/// puts in signs, the sign of each poINTM in the vector
template <typename T> inline void Vector<T>::sign(Vector<T>& signs) const {
T* prSign=signs.rawX();
for (INTM i = 0; i<_n; ++i) {
if (_X[i] == 0) {
prSign[i]=0.0;
} else {
prSign[i] = _X[i] > 0 ? 1.0 : -1.0;
}
}
};
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
template <typename T> inline void Vector<T>::l1project(Vector<T>& out,
const T thrs, const bool simplex) const {
out.copy(*this);
if (simplex) {
out.thrsPos();
} else {
vAbs<T>(_n,out._X,out._X);
}
T norm1 = out.sum();
if (norm1 <= thrs) {
if (!simplex) out.copy(*this);
return;
}
T* prU = out._X;
INTM sizeU = _n;
T sum = T();
INTM sum_card = 0;
while (sizeU > 0) {
// put the pivot in prU[0]
swap(prU[0],prU[sizeU/2]);
T pivot = prU[0];
INTM sizeG=1;
T sumG=pivot;
for (INTM i = 1; i<sizeU; ++i) {
if (prU[i] >= pivot) {
sumG += prU[i];
swap(prU[sizeG++],prU[i]);
}
}
if (sum + sumG - pivot*(sum_card + sizeG) <= thrs) {
sum_card += sizeG;
sum += sumG;
prU +=sizeG;
sizeU -= sizeG;
} else {
++prU;
sizeU = sizeG-1;
}
}
T lambda = (sum-thrs)/sum_card;
out.copy(*this);
if (simplex) {
out.thrsPos();
}
out.softThrshold(lambda);
};
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
template <typename T> inline void Vector<T>::l1project_weighted(Vector<T>& out, const Vector<T>& weights,
const T thrs, const bool residual) const {
out.copy(*this);
if (thrs==0) {
out.setZeros();
return;
}
vAbs<T>(_n,out._X,out._X);
out.div(weights);
Vector<INTM> keys(_n);
for (INTM i = 0; i<_n; ++i) keys[i]=i;
out.sort2(keys,false);
T sum1=0;
T sum2=0;
T lambda=0;
for (INTM i = 0; i<_n; ++i) {
const T lambda_old=lambda;
const T fact=weights[keys[i]]*weights[keys[i]];
lambda=out[i];
sum2 += fact;
sum1 += fact*lambda;
if (sum1 - lambda*sum2 >= thrs) {
sum2-=fact;
sum1-=fact*lambda;
lambda=lambda_old;
break;
}
}
lambda=MAX(0,(sum1-thrs)/sum2);
if (residual) {
for (INTM i = 0; i<_n; ++i) {
out._X[i]=_X[i] > 0 ? MIN(_X[i],lambda*weights[i]) : MAX(_X[i],-lambda*weights[i]);
}
} else {
for (INTM i = 0; i<_n; ++i) {
out._X[i]=_X[i] > 0 ? MAX(0,_X[i]-lambda*weights[i]) : MIN(0,_X[i]+lambda*weights[i]);
}
}
};
template <typename T>
inline void Vector<T>::project_sft_binary(const Vector<T>& y) {
T mean = this->mean();
Vector<T> ztilde, xtilde;
ztilde.resize(_n);
int count=0;
if (mean > 0) {
for (int ii=0; ii<_n; ++ii)
if (y[ii] > 0) {
count++;
ztilde[ii]=_X[ii]+T(1.0);
} else {
ztilde[ii]= _X[ii];
}
ztilde.l1project(xtilde,T(count));
for (int ii=0; ii<_n; ++ii)
_X[ii] = y[ii] > 0 ? xtilde[ii]-T(1.0) : xtilde[ii];
} else {
for (int ii=0; ii<_n; ++ii)
if (y[ii] > 0) {
ztilde[ii]=-_X[ii];
} else {
count++;
ztilde[ii]=- _X[ii] + T(1.0);
}
ztilde.l1project(xtilde,T(count));
for (int ii=0; ii<_n; ++ii)
_X[ii] = y[ii] > 0 ? -xtilde[ii] : -xtilde[ii]+T(1.0);
}
};
template <typename T>
inline void Vector<T>::project_sft(const Vector<int>& labels, const int clas) {
Vector<T> y(_n);
for (int ii=0; ii<_n; ++ii) y[ii] = labels[ii]==clas ? T(1.0) : -T(1.0);
this->project_sft_binary(y);
/* T mean = this->mean();
T thrs=mean;
while (abs(mean) > EPSILON) {
INTM n_seuils=0;
for (INTM i = 0; i< _n; ++i) {
_X[i] = _X[i]-thrs;
if (labels[i]==clas) {
if (_X[i] < -1.0) {
_X[i]=-1.0;
++n_seuils;
}
} else {
if (_X[i] < 0) {
++n_seuils;
_X[i]=0;
}
}
}
mean = this->mean();
thrs= mean * _n/(_n-n_seuils);*/
//}
};
template <typename T>
inline void Vector<T>::sparseProject(Vector<T>& out, const T thrs, const int mode, const T lambda1,
const T lambda2, const T lambda3, const bool pos) {
if (mode == 1) {
/// min_u ||b-u||_2^2 / ||u||_1 <= thrs
this->l1project(out,thrs,pos);
} else if (mode == 2) {
/// min_u ||b-u||_2^2 / ||u||_2^2 + lambda1||u||_1 <= thrs
if (lambda1 > 1e-10) {
this->scal(lambda1);
this->l1l2project(out,thrs,2.0/(lambda1*lambda1),pos);
this->scal(T(1.0/lambda1));
out.scal(T(1.0/lambda1));
} else {
out.copy(*this);
out.normalize2();
out.scal(sqrt(thrs));
}
} else if (mode == 3) {
/// min_u ||b-u||_2^2 / ||u||_1 + (lambda1/2) ||u||_2^2 <= thrs
this->l1l2project(out,thrs,lambda1,pos);
} else if (mode == 4) {
/// min_u 0.5||b-u||_2^2 + lambda1||u||_1 / ||u||_2^2 <= thrs
out.copy(*this);
if (pos)
out.thrsPos();
out.softThrshold(lambda1);
T nrm=out.nrm2sq();
if (nrm > thrs)
out.scal(sqr_alt<T>(thrs/nrm));
} else if (mode == 5) {
/// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) / ||u||_2^2 <= thrs
// this->fusedProject(out,lambda1,lambda2,100);
// T nrm=out.nrm2sq();
// if (nrm > thrs)
// out.scal(sqr_alt<T>(thrs/nrm));
// } else if (mode == 6) {
/// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) +0.5lambda_3 ||u||_2^2
this->fusedProjectHomotopy(out,lambda1,lambda2,lambda3,true);
} else if (mode==6) {
/// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= thrs
this->fusedProjectHomotopy(out,lambda1/thrs,lambda2/thrs,lambda3/thrs,false);
} else {
/// min_u ||b-u||_2^2 / (1-lambda1)*||u||_2^2 + lambda1||u||_1 <= thrs
if (lambda1 < 1e-10) {
out.copy(*this);
if (pos)
out.thrsPos();
out.normalize2();
out.scal(sqrt(thrs));
} else if (lambda1 > 0.999999) {
this->l1project(out,thrs,pos);
} else {
this->sparseProject(out,thrs/(1.0-lambda1),2,lambda1/(1-lambda1),0,0,pos);
}
}
};
/// returns true if the returned vector is null
template <typename T>
inline void Vector<T>::l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos,
const int mode) {
if (mode == 1) {
/// min_u ||b-u||_2^2 / ||u||_2^2 + gamma ||u||_1 <= thrs
this->scal(gamma);
this->l1l2project(out,thrs,2.0/(gamma*gamma),pos);
this->scal(T(1.0/gamma));
out.scal(T(1.0/gamma));
} else if (mode == 2) {
/// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs
this->l1l2project(out,thrs,gamma,pos);
} else if (mode == 3) {
/// min_u 0.5||b-u||_2^2 + gamma||u||_1 / ||u||_2^2 <= thrs
out.copy(*this);
if (pos)
out.thrsPos();
out.softThrshold(gamma);
T nrm=out.nrm2();
if (nrm > thrs)
out.scal(thrs/nrm);
}
}
/// returns true if the returned vector is null
/// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs
template <typename T>
inline void Vector<T>::l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos) const {
if (gamma == 0)
return this->l1project(out,thrs,pos);
out.copy(*this);
if (pos) {
out.thrsPos();
} else {
vAbs<T>(_n,out._X,out._X);
}
T norm = out.sum() + gamma*out.nrm2sq();
if (norm <= thrs) {
if (!pos) out.copy(*this);
return;
}
/// BEGIN
T* prU = out._X;
INTM sizeU = _n;
T sum = 0;
INTM sum_card = 0;
while (sizeU > 0) {
// put the pivot in prU[0]
swap(prU[0],prU[sizeU/2]);
T pivot = prU[0];
INTM sizeG=1;
T sumG=pivot+0.5*gamma*pivot*pivot;
for (INTM i = 1; i<sizeU; ++i) {
if (prU[i] >= pivot) {
sumG += prU[i]+0.5*gamma*prU[i]*prU[i];
swap(prU[sizeG++],prU[i]);
}
}
if (sum + sumG - pivot*(1+0.5*gamma*pivot)*(sum_card + sizeG) <
thrs*(1+gamma*pivot)*(1+gamma*pivot)) {
sum_card += sizeG;
sum += sumG;
prU +=sizeG;
sizeU -= sizeG;
} else {
++prU;
sizeU = sizeG-1;
}
}
T a = gamma*gamma*thrs+0.5*gamma*sum_card;
T b = 2*gamma*thrs+sum_card;
T c=thrs-sum;
T delta = b*b-4*a*c;
T lambda = (-b+sqrt(delta))/(2*a);
out.copy(*this);
if (pos) {
out.thrsPos();
}
out.fastSoftThrshold(lambda);
out.scal(T(1.0/(1+lambda*gamma)));
};
template <typename T>
static inline T fusedHomotopyAux(const bool& sign1,
const bool& sign2,
const bool& sign3,
const T& c1,
const T& c2) {
if (sign1) {
if (sign2) {
return sign3 ? 0 : c2;
} else {
return sign3 ? -c2-c1 : -c1;
}
} else {
if (sign2) {
return sign3 ? c1 : c1+c2;
} else {
return sign3 ? -c2 : 0;
}
}
};
template <typename T>
inline void Vector<T>::fusedProjectHomotopy(Vector<T>& alpha,
const T lambda1,const T lambda2,const T lambda3,
const bool penalty) {
T* pr_DtR=_X;
const INTM K = _n;
alpha.setZeros();
Vector<T> u(K); // regularization path for gamma
Vector<T> Du(K); // regularization path for alpha
Vector<T> DDu(K); // regularization path for alpha
Vector<T> gamma(K); // auxiliary variable
Vector<T> c(K); // auxiliary variables
Vector<T> scores(K); // auxiliary variables
gamma.setZeros();
T* pr_gamma = gamma.rawX();
T* pr_u = u.rawX();
T* pr_Du = Du.rawX();
T* pr_DDu = DDu.rawX();
T* pr_c = c.rawX();
T* pr_scores = scores.rawX();
Vector<INTM> ind(K+1);
Vector<bool> signs(K);
ind.set(K);
INTM* pr_ind = ind.rawX();
bool* pr_signs = signs.rawX();
/// Computation of DtR
T sumBeta = this->sum();
/// first element is selected, gamma and alpha are updated
pr_gamma[0]=sumBeta/K;
/// update alpha
alpha.set(pr_gamma[0]);
/// update DtR
this->sub(alpha);
for (INTM j = K-2; j>=0; --j)
pr_DtR[j] += pr_DtR[j+1];
pr_DtR[0]=0;
pr_ind[0]=0;
pr_signs[0] = pr_DtR[0] > 0;
pr_c[0]=T(1.0)/K;
INTM currentInd=this->fmax();
T currentLambda=abs<T>(pr_DtR[currentInd]);
bool newAtom = true;
/// Solve the Lasso using simplified LARS
for (INTM i = 1; i<K; ++i) {
/// exit if constraINTMs are satisfied
/// min_u ||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2
if (penalty && currentLambda <= lambda2) break;
if (!penalty) {
/// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= 1.0
scores.copy(alpha);
scores.softThrshold(lambda1*currentLambda/lambda2);
scores.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2)));
if (lambda1*scores.asum()+lambda2*scores.afused()+0.5*
lambda3*scores.nrm2sq() >= T(1.0)) break;
}
/// Update pr_ind and pr_c
if (newAtom) {
INTM j;
for (j = 1; j<i; ++j)
if (pr_ind[j] > currentInd) break;
for (INTM k = i; k>j; --k) {
pr_ind[k]=pr_ind[k-1];
pr_c[k]=pr_c[k-1];
pr_signs[k]=pr_signs[k-1];
}
pr_ind[j]=currentInd;
pr_signs[j]=pr_DtR[currentInd] > 0;
pr_c[j-1]=T(1.0)/(pr_ind[j]-pr_ind[j-1]);
pr_c[j]=T(1.0)/(pr_ind[j+1]-pr_ind[j]);
}
// Compute u
pr_u[0]= pr_signs[1] ? -pr_c[0] : pr_c[0];
if (i == 1) {
pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1];
} else {
pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1];
pr_u[1]+=pr_signs[2] ? -pr_c[1] : pr_c[1];
for (INTM j = 2; j<i; ++j) {
pr_u[j]=2*fusedHomotopyAux<T>(pr_signs[j-1],
pr_signs[j],pr_signs[j+1], pr_c[j-1],pr_c[j]);
}
pr_u[i] = pr_signs[i-1] ? -pr_c[i-1] : pr_c[i-1];
pr_u[i] += pr_signs[i] ? pr_c[i-1]+pr_c[i] : -pr_c[i-1]-pr_c[i];
}
// Compute Du
pr_Du[0]=pr_u[0];
for (INTM k = 1; k<pr_ind[1]; ++k)
pr_Du[k]=pr_Du[0];
for (INTM j = 1; j<=i; ++j) {
pr_Du[pr_ind[j]]=pr_Du[pr_ind[j]-1]+pr_u[j];
for (INTM k = pr_ind[j]+1; k<pr_ind[j+1]; ++k)
pr_Du[k]=pr_Du[pr_ind[j]];
}
/// Compute DDu
DDu.copy(Du);
for (INTM j = K-2; j>=0; --j)
pr_DDu[j] += pr_DDu[j+1];
/// Check constraINTMs
T max_step1 = INFINITY;
if (penalty) {
max_step1 = currentLambda-lambda2;
}
/// Check changes of sign
T max_step2 = INFINITY;
INTM step_out = -1;
for (INTM j = 1; j<=i; ++j) {
T ratio = -pr_gamma[pr_ind[j]]/pr_u[j];
if (ratio > 0 && ratio <= max_step2) {
max_step2=ratio;
step_out=j;
}
}
T max_step3 = INFINITY;
/// Check new variables entering the active set
for (INTM j = 1; j<K; ++j) {
T sc1 = (currentLambda-pr_DtR[j])/(T(1.0)-pr_DDu[j]);
T sc2 = (currentLambda+pr_DtR[j])/(T(1.0)+pr_DDu[j]);
if (sc1 <= 1e-10) sc1=INFINITY;
if (sc2 <= 1e-10) sc2=INFINITY;
pr_scores[j]= MIN(sc1,sc2);
}
for (INTM j = 0; j<=i; ++j) {
pr_scores[pr_ind[j]]=INFINITY;
}
currentInd = scores.fmin();
max_step3 = pr_scores[currentInd];
T step = MIN(max_step1,MIN(max_step3,max_step2));
if (step == 0 || step == INFINITY) break;
/// Update gamma, alpha, DtR, currentLambda
for (INTM j = 0; j<=i; ++j) {
pr_gamma[pr_ind[j]]+=step*pr_u[j];
}
alpha.add(Du,step);
this->add(DDu,-step);
currentLambda -= step;
if (step == max_step2) {
/// Update signs,pr_ind, pr_c
for (INTM k = step_out; k<=i; ++k)
pr_ind[k]=pr_ind[k+1];
pr_ind[i]=K;
for (INTM k = step_out; k<=i; ++k)
pr_signs[k]=pr_signs[k+1];
pr_c[step_out-1]=T(1.0)/(pr_ind[step_out]-pr_ind[step_out-1]);
pr_c[step_out]=T(1.0)/(pr_ind[step_out+1]-pr_ind[step_out]);
i-=2;
newAtom=false;
} else {
newAtom=true;
}
}
if (penalty) {
alpha.softThrshold(lambda1);
alpha.scal(T(1.0/(1.0+lambda3)));
} else {
alpha.softThrshold(lambda1*currentLambda/lambda2);
alpha.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2)));
}
};
template <typename T>
inline void Vector<T>::fusedProject(Vector<T>& alpha, const T lambda1, const T lambda2,
const int itermax) {
T* pr_alpha= alpha.rawX();
T* pr_beta=_X;
const INTM K = alpha.n();
T total_alpha =alpha.sum();
/// Modification of beta
for (INTM i = K-2; i>=0; --i)
pr_beta[i]+=pr_beta[i+1];
for (INTM i = 0; i<itermax; ++i) {
T sum_alpha=0;
T sum_diff = 0;
/// Update first coordinate
T gamma_old=pr_alpha[0];
pr_alpha[0]=(K*gamma_old+pr_beta[0]-
total_alpha)/K;
T diff = pr_alpha[0]-gamma_old;
sum_diff += diff;
sum_alpha += pr_alpha[0];
total_alpha +=K*diff;
/// Update alpha_j
for (INTM j = 1; j<K; ++j) {
pr_alpha[j]+=sum_diff;
T gamma_old=pr_alpha[j]-pr_alpha[j-1];
T gamma_new=softThrs((K-j)*gamma_old+pr_beta[j]-
(total_alpha-sum_alpha),lambda2)/(K-j);
pr_alpha[j]=pr_alpha[j-1]+gamma_new;
T diff = gamma_new-gamma_old;
sum_diff += diff;
sum_alpha+=pr_alpha[j];
total_alpha +=(K-j)*diff;
}
}
alpha.softThrshold(lambda1);
};
/// sort the vector
template <typename T>
inline void Vector<T>::sort(const bool mode) {
if (mode) {
lasrt<T>(incr,_n,_X);
} else {
lasrt<T>(decr,_n,_X);
}
};
/// sort the vector
template <typename T>
inline void Vector<T>::sort(Vector<T>& out, const bool mode) const {
out.copy(*this);
out.sort(mode);
};
template <typename T>
inline void Vector<T>::sort2(Vector<INTM>& key, const bool mode) {
quick_sort(key.rawX(),_X,(INTM)0,_n-1,mode);
};
template <typename T>
inline void Vector<T>::sort2(Vector<T>& out, Vector<INTM>& key, const bool mode) const {
out.copy(*this);
out.sort2(key,mode);
}
template <typename T>
inline void Vector<T>::applyBayerPattern(const int offset) {
INTM sizePatch=_n/3;
INTM n = static_cast<INTM>(sqrt(static_cast<T>(sizePatch)));
if (offset == 0) {
// R
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 1 : 2;
const INTM off = (i % 2) ? 0 : 1;
for (INTM j = off; j<n; j+=step) {
_X[i*n+j]=0;
}
}
// G
for (INTM i = 0; i<n; ++i) {
const INTM step = 2;
const INTM off = (i % 2) ? 1 : 0;
for (INTM j = off; j<n; j+=step) {
_X[sizePatch+i*n+j]=0;
}
}
// B
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 2 : 1;
const INTM off = 0;
for (INTM j = off; j<n; j+=step) {
_X[2*sizePatch+i*n+j]=0;
}
}
} else if (offset == 1) {
// R
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 2 : 1;
const INTM off = (i % 2) ? 1 : 0;
for (INTM j = off; j<n; j+=step) {
_X[i*n+j]=0;
}
}
// G
for (INTM i = 0; i<n; ++i) {
const INTM step = 2;
const INTM off = (i % 2) ? 0 : 1;
for (INTM j = off; j<n; j+=step) {
_X[sizePatch+i*n+j]=0;
}
}
// B
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 1 : 2;
const INTM off = 0;
for (INTM j = off; j<n; j+=step) {
_X[2*sizePatch+i*n+j]=0;
}
}
} else if (offset == 2) {
// R
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 1 : 2;
const INTM off = 0;
for (INTM j = off; j<n; j+=step) {
_X[i*n+j]=0;
}
}
// G
for (INTM i = 0; i<n; ++i) {
const INTM step = 2;
const INTM off = (i % 2) ? 0 : 1;
for (INTM j = off; j<n; j+=step) {
_X[sizePatch+i*n+j]=0;
}
}
// B
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 2 : 1;
const INTM off = (i % 2) ? 1 : 0;
for (INTM j = off; j<n; j+=step) {
_X[2*sizePatch+i*n+j]=0;
}
}
} else if (offset == 3) {
// R
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 2 : 1;
const INTM off = 0;
for (INTM j = off; j<n; j+=step) {
_X[i*n+j]=0;
}
}
// G
for (INTM i = 0; i<n; ++i) {
const INTM step = 2;
const INTM off = (i % 2) ? 1 : 0;
for (INTM j = off; j<n; j+=step) {
_X[sizePatch+i*n+j]=0;
}
}
// B
for (INTM i = 0; i<n; ++i) {
const INTM step = (i % 2) ? 1 : 2;
const INTM off = (i % 2) ? 0 : 1;
for (INTM j = off; j<n; j+=step) {
_X[2*sizePatch+i*n+j]=0;
}
}
}
};
/// make a sparse copy
template <typename T> inline void Vector<T>::toSparse(
SpVector<T>& vec) const {
INTM L=0;
T* v = vec._v;
INTM* r = vec._r;
for (INTM i = 0; i<_n; ++i) {
if (_X[i] != T()) {
v[L]=_X[i];
r[L++]=i;
}
}
vec._L=L;
};
template <typename T>
inline void Vector<T>::copyMask(Vector<T>& out, Vector<bool>& mask) const {
out.resize(_n);
INTM pointer=0;
for (INTM i = 0; i<_n; ++i) {
if (mask[i])
out[pointer++]=_X[i];
}
out.setn(pointer);
};
template <typename T>
inline void Matrix<T>::copyMask(Matrix<T>& out, Vector<bool>& mask) const {
out.resize(_m,_n);
INTM count=0;
for (INTM i = 0; i<mask.n(); ++i)
if (mask[i])
++count;
out.setm(count);
for (INTM i = 0; i<_n; ++i) {
INTM pointer=0;
for (INTM j = 0; j<_m; ++j) {
if (mask[j]) {
out[i*count+pointer]=_X[i*_m+j];
++pointer;
}
}
}
};
/* ****************************
* Implementation of SpMatrix
* ****************************/
/// Constructor, CSC format, existing data
template <typename T, typename I> SpMatrix<T,I>::SpMatrix(T* v, I* r, I* pB, I* pE,
I m, I n, I nzmax) :
_externAlloc(true), _v(v), _r(r), _pB(pB), _pE(pE), _m(m), _n(n), _nzmax(nzmax)
{ };
/// Constructor, new m x n matrix, with at most nzmax non-zeros values
template <typename T, typename I> SpMatrix<T,I>::SpMatrix(I m, I n, I nzmax) :
_externAlloc(false), _m(m), _n(n), _nzmax(nzmax) {
#pragma omp critical
{
_v=new T[nzmax];
_r=new I[nzmax];
_pB=new I[_n+1];
}
_pE=_pB+1;
};
/// Empty constructor
template <typename T, typename I> SpMatrix<T,I>::SpMatrix() :
_externAlloc(true), _v(NULL), _r(NULL), _pB(NULL), _pE(NULL),
_m(0),_n(0),_nzmax(0) { };
template <typename T, typename I>
inline void SpMatrix<T,I>::copy(const SpMatrix<T,I>& mat) {
this->resize(mat._m,mat._n,mat._nzmax);
memcpy(_v,mat._v,_nzmax*sizeof(T));
memcpy(_r,mat._r,_nzmax*sizeof(I));
memcpy(_pB,mat._pB,(_n+1)*sizeof(I));
}
/// Destructor
template <typename T, typename I> SpMatrix<T,I>::~SpMatrix() {
clear();
};
/// reference the column i Io vec
template <typename T, typename I> inline void SpMatrix<T,I>::refCol(I i,
SpVector<T,I>& vec) const {
if (vec._nzmax > 0) vec.clear();
vec._v=_v+_pB[i];
vec._r=_r+_pB[i];
vec._externAlloc=true;
vec._L=_pE[i]-_pB[i];
vec._nzmax=vec._L;
};
/// print the sparse matrix
template<typename T, typename I> inline void SpMatrix<T,I>::print(const string& name) const {
cerr << name << endl;
cerr << _m << " x " << _n << " , " << _nzmax << endl;
for (I i = 0; i<_n; ++i) {
for (I j = _pB[i]; j<_pE[i]; ++j) {
cerr << "(" <<_r[j] << "," << i << ") = " << _v[j] << endl;
}
}
};
template<typename T, typename I>
inline T SpMatrix<T,I>::operator[](const I index) const {
const I num_col=(index/_m);
const I num_row=index -num_col*_m;
T val = 0;
for (I j = _pB[num_col]; j<_pB[num_col+1]; ++j) {
if (_r[j]==num_row) {
val=_v[j];
break;
}
}
return val;
};
template<typename T, typename I>
void SpMatrix<T,I>::getData(Vector<T>& data, const I index) const {
data.resize(_m);
data.setZeros();
for (I i = _pB[index]; i< _pB[index+1]; ++i)
data[_r[i]]=_v[i];
};
template <typename T, typename I>
void SpMatrix<T,I>::setData(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax) {
this->clear();
_externAlloc =true;
_v = v;
_r=r;
_pB=pB;
_pE=pE;
_m=m;
_n=n;
_nzmax=nzmax;
}
/// compute the sum of the matrix elements
template <typename T, typename I> inline T SpMatrix<T,I>::asum() const {
return cblas_asum<T>(_pB[_n],_v,1);
};
/// compute the sum of the matrix elements
template <typename T, typename I> inline T SpMatrix<T,I>::normFsq() const {
return cblas_dot<T>(_pB[_n],_v,1,_v,1);
};
template <typename T, typename I>
inline void SpMatrix<T,I>::add_direct(const SpMatrix<T,I>& mat, const T a) {
Vector<T> v2(mat._v,mat._nzmax);
Vector<T> v1(_v,_nzmax);
v1.add(v2,a);
}
template <typename T, typename I>
inline void SpMatrix<T,I>::copy_direct(const SpMatrix<T,I>& mat) {
Vector<T> v2(mat._v,_pB[_n]);
Vector<T> v1(_v,_pB[_n]);
v1.copy(v2);
}
template <typename T, typename I>
inline T SpMatrix<T,I>::dot_direct(const SpMatrix<T,I>& mat) const {
Vector<T> v2(mat._v,_pB[_n]);
Vector<T> v1(_v,_pB[_n]);
return v1.dot(v2);
}
/// clear the matrix
template <typename T, typename I> inline void SpMatrix<T,I>::clear() {
if (!_externAlloc) {
delete[](_r);
delete[](_v);
delete[](_pB);
}
_n=0;
_m=0;
_nzmax=0;
_v=NULL;
_r=NULL;
_pB=NULL;
_pE=NULL;
_externAlloc=true;
};
/// resize the matrix
template <typename T, typename I> inline void SpMatrix<T,I>::resize(const I m,
const I n, const I nzmax) {
if (n == _n && m == _m && nzmax == _nzmax) return;
this->clear();
_n=n;
_m=m;
_nzmax=nzmax;
_externAlloc=false;
#pragma omp critical
{
_v = new T[nzmax];
_r = new I[nzmax];
_pB = new I[_n+1];
}
_pE = _pB+1;
for (I i = 0; i<=_n; ++i) _pB[i]=0;
};
/// resize the matrix
template <typename T, typename I> inline void SpMatrix<T,I>::scal(const T a) const {
cblas_scal<T>(_pB[_n],a,_v,1);
};
///// resize the matrix
template <typename T, typename I> inline T SpMatrix<T,I>::abs_mean() const {
Vector<T> vec(_v,_pB[_n]);
return vec.abs_mean();
};
/// y <- A'*x
template <typename T, typename I>
inline void SpMatrix<T,I>::multTrans(const Vector<T>& x, Vector<T>& y,
const T alpha, const T beta) const {
y.resize(_n);
if (beta) {
y.scal(beta);
} else {
y.setZeros();
}
const T* prX = x.rawX();
#pragma omp parallel for
for (I i = 0; i<_n; ++i) {
T sum=T();
for (I j = _pB[i]; j<_pE[i]; ++j) {
sum+=_v[j]*prX[_r[j]];
}
y[i] += alpha*sum;
}
};
/// perform b = alpha*A*x + beta*b, when x is sparse
template <typename T, typename I>
inline void SpMatrix<T,I>::multTrans(const SpVector<T,I>& x, Vector<T>& y,
const T alpha, const T beta) const {
y.resize(_n);
if (beta) {
y.scal(beta);
} else {
y.setZeros();
}
T* prY = y.rawX();
SpVector<T,I> col;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
prY[i] += alpha*x.dot(col);
}
};
/// y <- A*x
template <typename T, typename I>
inline void SpMatrix<T,I>::mult(const Vector<T>& x, Vector<T>& y,
const T alpha, const T beta) const {
y.resize(_m);
if (beta) {
y.scal(beta);
} else {
y.setZeros();
}
const T* prX = x.rawX();
for (I i = 0; i<_n; ++i) {
T sca=alpha* prX[i];
for (I j = _pB[i]; j<_pE[i]; ++j) {
y[_r[j]] += sca*_v[j];
}
}
};
/// perform b = alpha*A*x + beta*b, when x is sparse
template <typename T, typename I>
inline void SpMatrix<T,I>::mult(const SpVector<T,I>& x, Vector<T>& y,
const T alpha, const T beta) const {
y.resize(_m);
if (beta) {
y.scal(beta);
} else {
y.setZeros();
}
T* prY = y.rawX();
for (I i = 0; i<x.L(); ++i) {
I ind=x.r(i);
T val = alpha * x.v(i);
for (I j = _pB[ind]; j<_pE[ind]; ++j) {
prY[_r[j]] += val *_v[j];
}
}
};
/// perform C = a*A*B + b*C, possibly transposing A or B.
template <typename T, typename I>
inline void SpMatrix<T,I>::mult(const Matrix<T>& B, Matrix<T>& C,
const bool transA, const bool transB,
const T a, const T b) const {
if (transA) {
if (transB) {
C.resize(_n,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> tmp;
Vector<T> row(B.m());
for (I i = 0; i<_n; ++i) {
this->refCol(i,tmp);
B.mult(tmp,row);
C.addRow(i,row,a);
}
} else {
C.resize(_n,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> tmp;
Vector<T> row(B.n());
for (I i = 0; i<_n; ++i) {
this->refCol(i,tmp);
B.multTrans(tmp,row);
C.addRow(i,row,a);
}
}
} else {
if (transB) {
C.resize(_m,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> row(B.n());
Vector<T> col;
for (I i = 0; i<B.m(); ++i) {
B.copyRow(i,row);
C.refCol(i,col);
this->mult(row,col,a,T(1.0));
}
} else {
C.resize(_m,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
Vector<T> colB;
Vector<T> colC;
for (I i = 0; i<B.n(); ++i) {
B.refCol(i,colB);
C.refCol(i,colC);
this->mult(colB,colC,a,T(1.0));
}
}
}
};
/// perform C = a*A*B + b*C, possibly transposing A or B.
template <typename T, typename I>
inline void SpMatrix<T,I>::mult(const SpMatrix<T,I>& B, Matrix<T>& C,
const bool transA, const bool transB,
const T a, const T b) const {
if (transA) {
if (transB) {
C.resize(_n,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> tmp;
Vector<T> row(B.m());
for (I i = 0; i<_n; ++i) {
this->refCol(i,tmp);
B.mult(tmp,row);
C.addRow(i,row,a);
}
} else {
C.resize(_n,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> tmp;
Vector<T> row(B.n());
for (I i = 0; i<_n; ++i) {
this->refCol(i,tmp);
B.multTrans(tmp,row);
C.addRow(i,row,a);
}
}
} else {
if (transB) {
C.resize(_m,B.m());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> colB;
SpVector<T,I> colA;
for (I i = 0; i<_n; ++i) {
this->refCol(i,colA);
B.refCol(i,colB);
C.rank1Update(colA,colB,a);
}
} else {
C.resize(_m,B.n());
if (b) {
C.scal(b);
} else {
C.setZeros();
}
SpVector<T,I> colB;
Vector<T> colC;
for (I i = 0; i<B.n(); ++i) {
B.refCol(i,colB);
C.refCol(i,colC);
this->mult(colB,colC,a);
}
}
}
};
/// perform C = a*B*A + b*C, possibly transposing A or B.
template <typename T, typename I>
inline void SpMatrix<T,I>::multSwitch(const Matrix<T>& B, Matrix<T>& C,
const bool transA, const bool transB,
const T a, const T b) const {
B.mult(*this,C,transB,transA,a,b);
};
template <typename T, typename I>
inline T SpMatrix<T,I>::dot(const Matrix<T>& x) const {
T sum=0;
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j) {
sum+=_v[j]*x(_r[j],j);
}
return sum;
};
template <typename T, typename I>
inline void SpMatrix<T,I>::copyRow(const I ind, Vector<T>& x) const {
x.resize(_n);
x.setZeros();
for (I i = 0; i<_n; ++i) {
for (I j = _pB[i]; j<_pE[i]; ++j) {
if (_r[j]==ind) {
x[i]=_v[j];
} else if (_r[j] > ind) {
break;
}
}
}
};
template <typename T, typename I>
inline void SpMatrix<T,I>::addVecToCols(
const Vector<T>& vec, const T a) {
const T* pr_vec = vec.rawX();
if (isEqual(a,T(1.0))) {
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j)
_v[j] += pr_vec[_r[j]];
} else {
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j)
_v[j] += a*pr_vec[_r[j]];
}
};
template <typename T, typename I>
inline void SpMatrix<T,I>::addVecToColsWeighted(
const Vector<T>& vec, const T* weights, const T a) {
const T* pr_vec = vec.rawX();
if (isEqual(a,T(1.0))) {
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j)
_v[j] += pr_vec[_r[j]]*weights[j-_pB[i]];
} else {
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j)
_v[j] += a*pr_vec[_r[j]]*weights[j-_pB[i]];
}
};
template <typename T, typename I>
inline void SpMatrix<T,I>::sum_cols(Vector<T>& sum) const {
sum.resize(_m);
sum.setZeros();
SpVector<T,I> tmp;
for (I i = 0; i<_n; ++i) {
this->refCol(i,tmp);
sum.add(tmp);
}
};
/// aat <- A*A'
template <typename T, typename I> inline void SpMatrix<T,I>::AAt(Matrix<T>& aat) const {
I i,j,k;
I K=_m;
I M=_n;
/* compute alpha alpha^T */
aat.resize(K,K);
int NUM_THREADS=init_omp(MAX_THREADS);
T* aatT=new T[NUM_THREADS*K*K];
for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T();
#pragma omp parallel for private(i,j,k)
for (i = 0; i<M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T* write_area=aatT+numT*K*K;
for (j = _pB[i]; j<_pE[i]; ++j) {
for (k = _pB[i]; k<=j; ++k) {
write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k];
}
}
}
cblas_copy<T>(K*K,aatT,1,aat._X,1);
for (i = 1; i<NUM_THREADS; ++i)
cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1);
aat.fillSymmetric();
delete[](aatT);
}
template <typename T, typename I>
inline void SpMatrix<T,I>::XtX(Matrix<T>& XtX) const {
XtX.resize(_n,_n);
XtX.setZeros();
SpVector<T,I> col;
Vector<T> col_out;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
XtX.refCol(i,col_out);
this->multTrans(col,col_out);
}
};
/// aat <- A(:,indices)*A(:,indices)'
template <typename T, typename I> inline void SpMatrix<T,I>::AAt(Matrix<T>& aat,
const Vector<I>& indices) const {
I i,j,k;
I K=_m;
I M=indices.n();
/* compute alpha alpha^T */
aat.resize(K,K);
int NUM_THREADS=init_omp(MAX_THREADS);
T* aatT=new T[NUM_THREADS*K*K];
for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T();
#pragma omp parallel for private(i,j,k)
for (i = 0; i<M; ++i) {
I ii = indices[i];
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T* write_area=aatT+numT*K*K;
for (j = _pB[ii]; j<_pE[ii]; ++j) {
for (k = _pB[ii]; k<=j; ++k) {
write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k];
}
}
}
cblas_copy<T>(K*K,aatT,1,aat._X,1);
for (i = 1; i<NUM_THREADS; ++i)
cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1);
aat.fillSymmetric();
delete[](aatT);
}
/// aat <- sum_i w_i A(:,i)*A(:,i)'
template <typename T, typename I> inline void SpMatrix<T,I>::wAAt(const Vector<T>& w,
Matrix<T>& aat) const {
I i,j,k;
I K=_m;
I M=_n;
/* compute alpha alpha^T */
aat.resize(K,K);
int NUM_THREADS=init_omp(MAX_THREADS);
T* aatT=new T[NUM_THREADS*K*K];
for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T();
#pragma omp parallel for private(i,j,k)
for (i = 0; i<M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T* write_area=aatT+numT*K*K;
for (j = _pB[i]; j<_pE[i]; ++j) {
for (k = _pB[i]; k<=j; ++k) {
write_area[_r[j]*K+_r[k]]+=w._X[i]*_v[j]*_v[k];
}
}
}
cblas_copy<T>(K*K,aatT,1,aat._X,1);
for (i = 1; i<NUM_THREADS; ++i)
cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1);
aat.fillSymmetric();
delete[](aatT);
}
/// XAt <- X*A'
template <typename T, typename I> inline void SpMatrix<T,I>::XAt(const Matrix<T>& X,
Matrix<T>& XAt) const {
I j,i;
I n=X._m;
I K=_m;
I M=_n;
XAt.resize(n,K);
/* compute X alpha^T */
// int NUM_THREADS=init_omp(MAX_THREADS);
//T* XatT=new T[NUM_THREADS*n*K];
//for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T();
//#pragma omp parallel for private(i,j)
for (i = 0; i<M; ++i) {
//#ifdef _OPENMP
// int numT=omp_get_thread_num();
//#else
// int numT=0;
//#endif
// T* write_area=XatT+numT*n*K;
for (j = _pB[i]; j<_pE[i]; ++j) {
cblas_axpy<T>(n,_v[j],X._X+i*n,1,XAt._X+_r[j]*n,1);
}
}
// cblas_copy<T>(n*K,XatT,1,XAt._X,1);
// for (i = 1; i<NUM_THREADS; ++i)
// cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1);
// delete[](XatT);
};
/// XAt <- X(:,indices)*A(:,indices)'
template <typename T, typename I> inline void SpMatrix<T,I>::XAt(const Matrix<T>& X,
Matrix<T>& XAt, const Vector<I>& indices) const {
I j,i;
I n=X._m;
I K=_m;
I M=indices.n();
XAt.resize(n,K);
/* compute X alpha^T */
int NUM_THREADS=init_omp(MAX_THREADS);
T* XatT=new T[NUM_THREADS*n*K];
for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T();
#pragma omp parallel for private(i,j)
for (i = 0; i<M; ++i) {
I ii = indices[i];
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T* write_area=XatT+numT*n*K;
for (j = _pB[ii]; j<_pE[ii]; ++j) {
cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1);
}
}
cblas_copy<T>(n*K,XatT,1,XAt._X,1);
for (i = 1; i<NUM_THREADS; ++i)
cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1);
delete[](XatT);
};
/// XAt <- sum_i w_i X(:,i)*A(:,i)'
template <typename T, typename I> inline void SpMatrix<T,I>::wXAt(const Vector<T>& w,
const Matrix<T>& X, Matrix<T>& XAt, const int numThreads) const {
I j,l,i;
I n=X._m;
I K=_m;
I M=_n;
I Mx = X._n;
I numRepX= M/Mx;
assert(numRepX*Mx == M);
XAt.resize(n,K);
/* compute X alpha^T */
int NUM_THREADS=init_omp(numThreads);
T* XatT=new T[NUM_THREADS*n*K];
for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T();
#pragma omp parallel for private(i,j,l)
for (i = 0; i<Mx; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T * write_area=XatT+numT*n*K;
for (l = 0; l<numRepX; ++l) {
I ind=numRepX*i+l;
if (w._X[ind] != 0)
for (j = _pB[ind]; j<_pE[ind]; ++j) {
cblas_axpy<T>(n,w._X[ind]*_v[j],X._X+i*n,1,write_area+_r[j]*n,1);
}
}
}
cblas_copy<T>(n*K,XatT,1,XAt._X,1);
for (i = 1; i<NUM_THREADS; ++i)
cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1);
delete[](XatT);
};
/// copy the sparse matrix into a dense matrix
template<typename T, typename I> inline void SpMatrix<T,I>::toFull(Matrix<T>& matrix) const {
matrix.resize(_m,_n);
matrix.setZeros();
T* out = matrix._X;
for (I i=0; i<_n; ++i) {
for (I j = _pB[i]; j<_pE[i]; ++j) {
out[i*_m+_r[j]]=_v[j];
}
}
};
/// copy the sparse matrix into a full dense matrix
template <typename T, typename I> inline void SpMatrix<T,I>::toFullTrans(
Matrix<T>& matrix) const {
matrix.resize(_n,_m);
matrix.setZeros();
T* out = matrix._X;
for (I i=0; i<_n; ++i) {
for (I j = _pB[i]; j<_pE[i]; ++j) {
out[i+_r[j]*_n]=_v[j];
}
}
};
/// use the data from v, r for _v, _r
template <typename T, typename I> inline void SpMatrix<T,I>::convert(const Matrix<T>&vM,
const Matrix<I>& rM, const I K) {
const I M = rM.n();
const I L = rM.m();
const I* r = rM.X();
const T* v = vM.X();
I count=0;
for (I i = 0; i<M*L; ++i) if (r[i] != -1) ++count;
resize(K,M,count);
count=0;
for (I i = 0; i<M; ++i) {
_pB[i]=count;
for (I j = 0; j<L; ++j) {
if (r[i*L+j] == -1) break;
_v[count]=v[i*L+j];
_r[count++]=r[i*L+j];
}
_pE[i]=count;
}
for (I i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1);
};
/// use the data from v, r for _v, _r
template <typename T, typename I> inline void SpMatrix<T,I>::convert2(
const Matrix<T>&vM, const Vector<I>& rv, const I K) {
const I M = vM.n();
const I L = vM.m();
I* r = rv.rawX();
const T* v = vM.X();
I LL=0;
for (I i = 0; i<L; ++i) if (r[i] != -1) ++LL;
this->resize(K,M,LL*M);
I count=0;
for (I i = 0; i<M; ++i) {
_pB[i]=count;
for (I j = 0; j<LL; ++j) {
_v[count]=v[i*L+j];
_r[count++]=r[j];
}
_pE[i]=count;
}
for (I i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1);
};
/// returns the l2 norms ^2 of the columns
template <typename T, typename I>
inline void SpMatrix<T,I>::normalize() {
SpVector<T,I> col;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
const T norm = col.nrm2sq();
if (norm > 1e-10)
col.scal(T(1.0)/col.nrm2sq());
}
};
/// returns the l2 norms ^2 of the columns
template <typename T, typename I>
inline void SpMatrix<T,I>::normalize_rows() {
Vector<T> norms(_m);
norms.setZeros();
for (I i = 0; i<_n; ++i) {
for (I j = _pB[i]; j<_pE[i]; ++j) {
norms[_r[j]] += _v[j]*_v[j];
}
}
norms.Sqrt();
for (I i = 0; i<_m; ++i)
norms[i] = norms[i] < 1e-10 ? T(1.0) : T(1.0)/norms[i];
for (I i = 0; i<_n; ++i)
for (I j = _pB[i]; j<_pE[i]; ++j)
_v[j] *= norms[_r[j]];
};
/// returns the l2 norms ^2 of the columns
template <typename T, typename I>
inline void SpMatrix<T,I>::norm_2sq_cols(Vector<T>& norms) const {
norms.resize(_n);
SpVector<T,I> col;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
norms[i] = col.nrm2sq();
}
};
template <typename T, typename I>
inline void SpMatrix<T,I>::norm_0_cols(Vector<T>& norms) const {
norms.resize(_n);
SpVector<T,I> col;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
norms[i] = static_cast<T>(col.length());
}
};
template <typename T, typename I>
inline void SpMatrix<T,I>::norm_1_cols(Vector<T>& norms) const {
norms.resize(_n);
SpVector<T,I> col;
for (I i = 0; i<_n; ++i) {
this->refCol(i,col);
norms[i] =col.asum();
}
};
/* ***************************
* Implementation of SpVector
* ***************************/
/// Constructor, of the sparse vector of size L.
template <typename T, typename I> SpVector<T,I>::SpVector(T* v, I* r, I L, I nzmax) :
_externAlloc(true), _v(v), _r(r), _L(L), _nzmax(nzmax) { };
/// Constructor, allocates nzmax slots
template <typename T, typename I> SpVector<T,I>::SpVector(I nzmax) :
_externAlloc(false), _L(0), _nzmax(nzmax) {
#pragma omp critical
{
_v = new T[nzmax];
_r = new I[nzmax];
}
};
/// Empty constructor
template <typename T, typename I> SpVector<T,I>::SpVector() : _externAlloc(true), _v(NULL), _r(NULL), _L(0),
_nzmax(0) { };
/// Destructor
template <typename T, typename I> SpVector<T,I>::~SpVector() { clear(); };
/// computes the sum of the magnitude of the elements
template <typename T, typename I> inline T SpVector<T,I>::asum() const {
return cblas_asum<T>(_L,_v,1);
};
/// computes the l2 norm ^2 of the vector
template <typename T, typename I> inline T SpVector<T,I>::nrm2sq() const {
return cblas_dot<T>(_L,_v,1,_v,1);
};
/// computes the l2 norm of the vector
template <typename T, typename I> inline T SpVector<T,I>::nrm2() const {
return cblas_nrm2<T>(_L,_v,1);
};
/// computes the l2 norm of the vector
template <typename T, typename I> inline T SpVector<T,I>::fmaxval() const {
Vector<T> tmp(_v,_L);
return tmp.fmaxval();
};
/// print the vector to std::cerr
template <typename T, typename I> inline void SpVector<T,I>::print(const string& name) const {
std::cerr << name << std::endl;
std::cerr << _nzmax << std::endl;
for (I i = 0; i<_L; ++i)
cerr << "(" <<_r[i] << ", " << _v[i] << ")" << endl;
};
/// create a reference on the vector r
template <typename T, typename I> inline void SpVector<T,I>::refIndices(
Vector<I>& indices) const {
indices.setPointer(_r,_L);
};
template <typename T, typename I> inline void SpVector<T,I>::getIndices(Vector<int>& indices) const {
// indices.resize(_L);
indices.setn(_L);
for (int ii=0; ii<_L; ++ii)
indices[ii]=_r[ii];
};
/// creates a reference on the vector val
template <typename T, typename I> inline void SpVector<T,I>::refVal(
Vector<T>& val) const {
val.setPointer(_v,_L);
};
/// a <- a.^2
template <typename T, typename I> inline void SpVector<T,I>::sqr() {
vSqr<T>(_L,_v,_v);
};
template <typename T, typename I>
inline void SpVector<T,I>::scal(const T a) {
cblas_scal<T>(_L,a,_v,1);
};
template <typename T, typename I>
inline T SpVector<T,I>::dot(const SpVector<T,I>& vec) const {
T sum=T();
I countI = 0;
I countJ = 0;
while (countI < _L && countJ < vec._L) {
const I rI = _r[countI];
const I rJ = vec._r[countJ];
if (rI > rJ) {
++countJ;
} else if (rJ > rI) {
++countI;
} else {
sum+=_v[countI]*vec._v[countJ];
++countI;
++countJ;
}
}
return sum;
};
template <typename T, typename I>
inline T SpVector<T,I>::dot(const Vector<T>& vec) const {
//return cblas_doti(_L,_v,_r,vec.rawX());
T sum=T();
for (int countI=0; countI < _L; ++countI)
sum+=_v[countI]*vec[_r[countI]];
return sum;
};
/// clears the vector
template <typename T, typename I> inline void SpVector<T,I>::clear() {
if (!_externAlloc) {
delete[](_v);
delete[](_r);
}
_v=NULL;
_r=NULL;
_L=0;
_nzmax=0;
_externAlloc=true;
};
/// resizes the vector
template <typename T, typename I> inline void SpVector<T,I>::resize(const I nzmax) {
if (_nzmax != nzmax) {
clear();
_nzmax=nzmax;
_L=0;
_externAlloc=false;
#pragma omp critical
{
_v=new T[nzmax];
_r=new I[nzmax];
}
}
};
template <typename T, typename I> void inline SpVector<T,I>::toSpMatrix(
SpMatrix<T,I>& out, const I m, const I n) const {
out.resize(m,n,_L);
cblas_copy<T>(_L,_v,1,out._v,1);
I current_col=0;
I* out_r=out._r;
I* out_pB=out._pB;
out_pB[0]=current_col;
for (I i = 0; i<_L; ++i) {
I col=_r[i]/m;
if (col > current_col) {
out_pB[current_col+1]=i;
current_col++;
i--;
} else {
out_r[i]=_r[i]-col*m;
}
}
for (current_col++ ; current_col < n+1; ++current_col)
out_pB[current_col]=_L;
};
template <typename T, typename I> void inline SpVector<T,I>::toFull(Vector<T>& out)
const {
out.setZeros();
T* X = out.rawX();
for (I i = 0; i<_L; ++i)
X[_r[i]]=_v[i];
};
#endif
|
vertex_miner.h | #ifndef VERTEX_MINER_H
#define VERTEX_MINER_H
#include "miner.h"
typedef std::unordered_map<BaseEmbedding, Frequency> SimpleMap;
typedef QuickPattern<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrQPattern; // structural quick pattern
typedef CanonicalGraph<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrCPattern; // structural canonical pattern
typedef std::unordered_map<StrQPattern, Frequency> StrQpMapFreq; // mapping structural quick pattern to its frequency
typedef std::unordered_map<StrCPattern, Frequency> StrCgMapFreq; // mapping structural canonical pattern to its frequency
typedef PerThreadStorage<StrQpMapFreq> LocalStrQpMapFreq;
typedef PerThreadStorage<StrCgMapFreq> LocalStrCgMapFreq;
class VertexMiner : public Miner {
public:
VertexMiner(Graph *g, unsigned size = 3, int nthreads = 1) {
graph = g;
max_size = size;
degree_counting();
numThreads = nthreads;
}
virtual ~VertexMiner() {}
// extension for vertex-induced motif
inline void extend_vertex(unsigned level, EmbeddingList& emb_list) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
num_new_emb[pos] = 0;
unsigned n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
num_new_emb[pos] ++;
}
}
}
}
UintList indices = parallel_prefix_sum<unsigned>(num_new_emb);
num_new_emb.clear();
auto new_size = indices.back();
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
#ifdef USE_WEDGE
if (level == 1 && max_size == 4) {
is_wedge.resize(emb_list.size());
std::fill(is_wedge.begin(), is_wedge.end(), 0);
}
#endif
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
auto start = indices[pos];
auto n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
assert(start < indices.back());
if (n == 2 && max_size == 4)
emb_list.set_pid(start, find_motif_pattern_id(n, i, dst, emb, start));
emb_list.set_idx(level+1, start, pos);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
indices.clear();
}
// extension for vertex-induced clique
inline void extend_vertex(unsigned level, EmbeddingList& emb_list, Accumulator<AccType> &num) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
BaseEmbedding emb(level+1);
get_embedding<BaseEmbedding>(level, pos, emb_list, emb);
VertexId vid = emb_list.get_vid(level, pos);
num_new_emb[pos] = 0;
//std::cout << "current embeddings: " << emb << "\n";
//for (auto e : graph->edges(vid)) {
IndexT row_begin = graph->edge_begin(vid);
IndexT row_end = graph->edge_end(vid);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
//printf("Examine edge(%d,%d)\n", vid, dst);
if (is_all_connected_dag(dst, emb, level)) {
//std::cout << "clique found\n";
if (level < max_size-2) num_new_emb[pos] ++;
else num += 1;
}
}
}
//std::cout << "number of cliques: " << num.reduce() << "\n";
if (level == max_size-2) return;
UintList indices = parallel_prefix_sum<unsigned>(num_new_emb);
num_new_emb.clear();
auto new_size = indices.back();
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
BaseEmbedding emb(level+1);
get_embedding<BaseEmbedding>(level, pos, emb_list, emb);
VertexId vid = emb_list.get_vid(level, pos);
unsigned start = indices[pos];
//for (auto e : graph->edges(vid)) {
IndexT row_begin = graph->edge_begin(vid);
IndexT row_end = graph->edge_end(vid);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (is_all_connected_dag(dst, emb, level)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_vid(level+1, start++, dst);
}
}
}
indices.clear();
}
inline void aggregate(unsigned level, EmbeddingList& emb_list, std::vector<UlongAccu> &accumulators) {
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
unsigned n = emb.size();
if (n == 3) emb.set_pid(emb_list.get_pid(pos));
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
assert(n < 4);
unsigned pid = find_motif_pattern_id(n, i, dst, emb, pos);
assert(pid < accumulators.size());
//printf("pid = %u\n", pid);
accumulators[pid] += 1;
}
}
}
emb.clean();
}
}
inline void quick_aggregate(unsigned level, const EmbeddingList& emb_list) {
for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear();
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
int tid = omp_get_thread_num();
StrQpMapFreq* qp_map = qp_localmaps.getLocal(tid);
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
unsigned n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
std::vector<bool> connected;
get_connectivity(n, i, dst, emb, connected);
StrQPattern qp(n+1, connected);
if (qp_map->find(qp) != qp_map->end()) {
(*qp_map)[qp] += 1;
qp.clean();
} else (*qp_map)[qp] = 1;
}
}
}
emb.clean();
}
}
// canonical pattern aggregation
inline void canonical_aggregate() {
for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear();
//auto it = qp_map.begin();
//#pragma omp parallel for
/*
for (auto i = 0; i < qp_map.size(); i++) {
StrCgMapFreq* cg_map = cg_localmaps.getLocal(i);
StrCPattern cg(it->first);
if (cg_map->find(cg) != cg_map->end()) (*cg_map)[cg] += it->second;
else (*cg_map)[cg] = it->second;
cg.clean();
}
qp_map.clear();
*/
}
inline void merge_qp_map() {
qp_map.clear();
for (unsigned i = 0; i < qp_localmaps.size(); i++) {
StrQpMapFreq qp_lmap = *qp_localmaps.getLocal(i);
for (auto element : qp_lmap) {
if (qp_map.find(element.first) != qp_map.end())
qp_map[element.first] += element.second;
else qp_map[element.first] = element.second;
}
}
}
inline void merge_cg_map() {
cg_map.clear();
for (unsigned i = 0; i < cg_localmaps.size(); i++) {
StrCgMapFreq cg_lmap = *cg_localmaps.getLocal(i);
for (auto element : cg_lmap) {
if (cg_map.find(element.first) != cg_map.end())
cg_map[element.first] += element.second;
else cg_map[element.first] = element.second;
}
}
}
// Utilities
void printout_motifs(std::vector<UlongAccu> &accumulators) {
std::cout << std::endl;
if (accumulators.size() == 2) {
std::cout << "\ttriangles\t" << accumulators[0].reduce() << std::endl;
std::cout << "\t3-chains\t" << accumulators[1].reduce() << std::endl;
} else if (accumulators.size() == 6) {
std::cout << "\t4-paths --> " << accumulators[0].reduce() << std::endl;
std::cout << "\t3-stars --> " << accumulators[1].reduce() << std::endl;
std::cout << "\t4-cycles --> " << accumulators[2].reduce() << std::endl;
std::cout << "\ttailed-triangles --> " << accumulators[3].reduce() << std::endl;
std::cout << "\tdiamonds --> " << accumulators[4].reduce() << std::endl;
std::cout << "\t4-cliques --> " << accumulators[5].reduce() << std::endl;
} else {
std::cout << "\ttoo many patterns to show\n";
}
std::cout << std::endl;
}
void printout_motifs(UintMap &p_map) {
assert(p_map.size() == 21);
std::cout << std::endl;
for (auto it = p_map.begin(); it != p_map.end(); ++it)
std::cout << "{" << it->first << "} --> " << it->second << std::endl;
std::cout << std::endl;
}
void printout_motifs() {
std::cout << std::endl;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it)
std::cout << it->first << " --> " << it->second << std::endl;
std::cout << std::endl;
}
private:
//unsigned num_cliques;
unsigned max_size;
int numThreads;
std::vector<unsigned> is_wedge; // indicate a 3-vertex embedding is a wedge or chain (v0-cntered or v1-centered)
StrQpMapFreq qp_map; // quick patterns map for counting the frequency
StrCgMapFreq cg_map; // canonical graph map for couting the frequency
LocalStrQpMapFreq qp_localmaps; // quick patterns local map for each thread
LocalStrCgMapFreq cg_localmaps; // canonical graph local map for each thread
template <typename EmbeddingTy>
inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EmbeddingTy &emb) {
VertexId vid = emb_list.get_vid(level, pos);
IndexTy idx = emb_list.get_idx(level, pos);
ElementType ele(vid);
emb.set_element(level, ele);
// backward constructing the embedding
for (unsigned l = 1; l < level; l ++) {
VertexId u = emb_list.get_vid(level-l, idx);
ElementType ele(u);
emb.set_element(level-l, ele);
idx = emb_list.get_idx(level-l, idx);
}
ElementType ele0(idx);
emb.set_element(0, ele0);
}
inline bool is_vertexInduced_automorphism(const VertexEmbedding& emb, unsigned idx, VertexId src, VertexId dst) {
unsigned n = emb.size();
// the new vertex id should be larger than the first vertex id
if (dst <= emb.get_vertex(0)) return true;
// the new vertex should not already exist in the embedding
for (unsigned i = 1; i < n; ++i)
if (dst == emb.get_vertex(i)) return true;
// the new vertex should not already be extended by any previous vertex in the embedding
for (unsigned i = 0; i < idx; ++i)
if (is_connected(emb.get_vertex(i), dst)) return true;
// the new vertex id should be larger than any vertex id after its source vertex in the embedding
for (unsigned i = idx+1; i < n; ++i)
if (dst < emb.get_vertex(i)) return true;
return false;
}
inline unsigned find_motif_pattern_id(unsigned n, unsigned idx, VertexId dst, const VertexEmbedding& emb, unsigned pos = 0) {
unsigned pid = 0;
if (n == 2) { // count 3-motifs
pid = 1; // 3-chain
if (idx == 0) {
if (is_connected(emb.get_vertex(1), dst)) pid = 0; // triangle
#ifdef USE_WEDGE
else if (max_size == 4) is_wedge[pos] = 1; // wedge; used for 4-motif
#endif
}
} else if (n == 3) { // count 4-motifs
unsigned num_edges = 1;
pid = emb.get_pid();
if (pid == 0) { // extending a triangle
for (unsigned j = idx+1; j < n; j ++)
if (is_connected(emb.get_vertex(j), dst)) num_edges ++;
pid = num_edges + 2; // p3: tailed-triangle; p4: diamond; p5: 4-clique
} else { // extending a 3-chain
assert(pid == 1);
std::vector<bool> connected(3, false);
connected[idx] = true;
for (unsigned j = idx+1; j < n; j ++) {
if (is_connected(emb.get_vertex(j), dst)) {
num_edges ++;
connected[j] = true;
}
}
if (num_edges == 1) {
pid = 0; // p0: 3-path
unsigned center = 1;
#ifdef USE_WEDGE
if (is_wedge[pos]) center = 0;
#else
center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0;
#endif
if (idx == center) pid = 1; // p1: 3-star
} else if (num_edges == 2) {
pid = 2; // p2: 4-cycle
unsigned center = 1;
#ifdef USE_WEDGE
if (is_wedge[pos]) center = 0;
#else
center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0;
#endif
if (connected[center]) pid = 3; // p3: tailed-triangle
} else {
pid = 4; // p4: diamond
}
}
} else { // count 5-motif and beyond
std::vector<bool> connected;
get_connectivity(n, idx, dst, emb, connected);
Matrix A(n+1, std::vector<MatType>(n+1, 0));
gen_adj_matrix(n+1, connected, A);
std::vector<MatType> c(n+1, 0);
char_polynomial(n+1, A, c);
bliss::UintSeqHash h;
for (unsigned i = 0; i < n+1; ++i)
h.update((unsigned)c[i]);
pid = h.get_value();
}
return pid;
}
};
#endif // VERTEX_MINER_HPP_
|
GB_binop__isge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint8)
// A*D function (colscale): GB (_AxD__isge_uint8)
// D*A function (rowscale): GB (_DxB__isge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint8)
// C=scalar+B GB (_bind1st__isge_uint8)
// C=scalar+B' GB (_bind1st_tran__isge_uint8)
// C=A+scalar GB (_bind2nd__isge_uint8)
// C=A'+scalar GB (_bind2nd_tran__isge_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_odf_fmt_plug.c | /* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF Blowfish format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_odf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_odf);
#else
#include <string.h>
#include "sha.h"
#include <openssl/blowfish.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#define FORMAT_LABEL "ODF-opencl"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 OpenCL Blowfish"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(odf_cpu_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[20]; // hash of password
} odf_password;
typedef struct {
uint32_t v[32/4];
} odf_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint8_t length;
uint8_t salt[64];
} odf_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} odf_cpu_salt;
static odf_cpu_salt *cur_salt;
static struct fmt_tests odf_tests[] = {
{"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"},
{"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"},
{"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"},
{"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"},
{NULL}
};
static cl_int cl_error;
static odf_password *inbuffer;
static odf_hash *outbuffer;
static odf_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(odf_password) * gws;
outsize = sizeof(odf_hash) * gws;
settingsize = sizeof(odf_salt);
cracked_size = sizeof(*crypt_out) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(odf_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$odf$*", 6))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0) {
goto err;
}
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
//if (hexlenl(p) != res) // Hmm. res==16, length of p == 40??? Not sure about this one.
// goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static odf_cpu_salt cs;
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
p = strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (odf_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#undef set_key
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
unsigned char hash[20];
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 20);
inbuffer[index].length = 20;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
BF_KEY bf_key;
SHA_CTX ctx;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, (unsigned char*)outbuffer[index].v);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->content_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_odf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
odf_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
declare_variant_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// expected-no-diagnostics
int foo(void);
#pragma omp declare variant(foo) match(xxx={}, yyy={ccc})
#pragma omp declare variant(foo) match(xxx={vvv})
#pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)}, device={kind(fpga)})
#pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx})
#pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)}, device={kind(cpu, nohost)})
#pragma omp declare variant(foo) match(device={kind(host)})
#pragma omp declare variant(foo) match(device={kind(nohost), xxx})
#pragma omp declare variant(foo) match(implementation={extension(match_all)})
#pragma omp declare variant(foo) match(implementation={extension(match_any)})
#pragma omp declare variant(foo) match(implementation={extension(match_none)})
int bar(void);
// CHECK: int foo();
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_none)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_any)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_all)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(host)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm)}, device={kind(cpu, nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0): llvm)}, device={kind(fpga)})
// CHECK-NEXT: int bar();
|
nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI)
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void BuildAll(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
// LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0);
// LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0);
// Element::EquationIdVectorType solidEquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
// if(itNode->Is(SOLID)){
// NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
// const unsigned int neighSize = neighb_nodes.size() +1 ;
// if(neighSize>1)
// {
// const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
// if (solidLHS_Contribution.size1() != 1)
// solidLHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
// if (solidRHS_Contribution.size() != 1)
// solidRHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
// solidLHS_Contribution= ZeroMatrix(1,1);
// solidRHS_Contribution= ZeroVector(1);
// if (solidEquationId.size() != 1)
// solidEquationId.resize(1, false);
// // if (solidLHS_Contribution.size1() != neighSize)
// // solidLHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// // if (solidRHS_Contribution.size() != neighSize)
// // solidRHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// // solidLHS_Contribution= ZeroMatrix(neighSize,neighSize);
// // solidRHS_Contribution= ZeroVector(neighSize);
// // if (solidEquationId.size() != neighSize)
// // solidEquationId.resize(neighSize, false);
// double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
// double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0;
// deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1);
// solidLHS_Contribution(0,0)+= nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume;
// const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
// solidEquationId[0]=itNode->GetDof(PRESSURE,xDofPos).EquationId();
// // Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
// // // const unsigned int neighSize = neighb_nodes.size()+1;
// // const unsigned int neighSize = nodalSFDneighboursId.size();
// // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// // for (unsigned int i = 0; i< neighSize; i++)
// // {
// // unsigned int indexNode=i+1;
// // if(indexNode<neighSize){
// // unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// // if(neigh_nodes_id==other_neigh_nodes_id){
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // break;
// // }
// // }
// // }
// // }
// // }else{
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // }
// // }
// #ifdef _OPENMP
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array);
// #else
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId);
// #endif
// }
// }
//if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
// if (LHS_Contribution.size1() != neighSize)
// LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// if (RHS_Contribution.size() != neighSize)
// RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// LHS_Contribution= ZeroMatrix(neighSize,neighSize);
// RHS_Contribution= ZeroVector(neighSize);
// if (EquationId.size() != neighSize)
// EquationId.resize(neighSize, false);
if (LHS_Contribution.size1() != 1)
LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != 1)
RHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
LHS_Contribution = ZeroMatrix(1, 1);
RHS_Contribution = ZeroVector(1);
if (EquationId.size() != 1)
EquationId.resize(1, false);
if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if (nodalVolume > 0)
{ // in interface nodes not in contact with fluid elements the nodal volume is zero
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1 && itNode->IsNot(SOLID))
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) + 2.0 * deviatoricCoeff / 3.0;
if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
}
}
if (itNode->Is(SOLID))
{
double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
double volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
}
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
// Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// // const unsigned int neighSize = neighb_nodes.size()+1;
// const unsigned int neighSize = nodalSFDneighboursId.size();
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// for (unsigned int i = 0; i< neighSize; i++)
// {
// unsigned int indexNode=i+1;
// if(indexNode<neighSize){
// unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// if(neigh_nodes_id==other_neigh_nodes_id){
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// break;
// }
// }
// }
// }
// }else{
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// }
// }
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
//}
}
// }
ElementsArrayType &pElements = rModelPart.Elements();
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType elementalEquationId;
const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//if((*it)->Is(FLUID)){
if ((*it)->IsNot(SOLID))
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(elementalLHS_Contribution, elementalRHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (elementalEquationId.size() != geom.size())
elementalEquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
elementalEquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId, lock_array);
#else
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId);
#endif
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
Timer::Start("Build");
/* boost::timer c_build_time; */
BuildAll(pScheme, rModelPart, A, b);
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
/* const double start_solve = OpenMPUtils::GetCurrentTime(); */
Timer::Start("Solve");
/* boost::timer c_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
/* const double stop_solve = OpenMPUtils::GetCurrentTime(); */
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart &r_model_part,
TSystemMatrixType &A,
TSystemVectorType &b) override
{
KRATOS_TRY
if (!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType &pElements = r_model_part.Elements();
// //getting the array of the conditions
// ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//create a partition of the element array
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
double start_prod = OpenMPUtils::GetCurrentTime();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const ProcessInfo &CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (EquationId.size() != geom.size())
EquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
EquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
#else
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
if (this->GetEchoLevel() > 0)
{
double stop_prod = OpenMPUtils::GetCurrentTime();
std::cout << "parallel building time: " << stop_prod - start_prod << std::endl;
}
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
//#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); ++i)
{
auto it_elem = pElements.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
// ConditionsArrayType& pConditions = rModelPart.Conditions();
// const int nconditions = static_cast<int>(pConditions.size());
// #pragma omp parallel for firstprivate(nconditions, ElementalDofList)
// for (int i = 0; i < nconditions; i++)
// {
// typename ConditionsArrayType::iterator it = pConditions.begin() + i;
// const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// // gets list of Dof involved on every element
// pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
// dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
// }
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(*it);
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
/* boost::timer c_contruct_matrix; */
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
/* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId
#ifdef _OPENMP
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t>> temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem < number_of_elements; ++i_elem)
{
auto it_elem = el_begin + i_elem;
pScheme->EquationId(*it_elem, ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond < number_of_conditions; ++i_cond)
{
auto it_cond = cond_begin + i_cond;
pScheme->EquationId(*it_cond, ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i)
{
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int> &partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
void AssembleRHS(
TSystemVectorType &b,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
|
Convolve.h | #ifndef K_CV_CONVOLVE_H
#define K_CV_CONVOLVE_H
#include "ImageChannel.h"
#include "Kernel.h"
#include <cmath>
namespace K {
class Convolve {
public:
/**
* convolve the given image with the provided kernel.
* returns a newly created image.
*/
static ImageChannel run(const ImageChannel& src, const Kernel& k, const bool normalize = true) {
K::ImageChannel dst(src.getWidth(), src.getHeight());
convolve(src, dst, k, normalize);
return dst;
}
template <typename T> static void convolve(const T& src, T& dst, const Kernel& k, const bool normalize = true) {
#pragma omp parallel for
for (int y = 0; y < src.getHeight(); ++y) {
for (int x = 0; x < src.getWidth(); ++x) {
const int dx = k.getWidth() / 2;
const int dy = k.getHeight() / 2;
float val = 0; // the value after convolving all pixels
float sum = 0; // used for normalization (unnormalized kernels / edges [less values ues])
// convolve the current pixel with the kernel
for (int y1 = 0; y1 < k.getHeight(); ++y1) {
for (int x1 = 0; x1 < k.getWidth(); ++x1) {
const int ix = x+x1-dx;
const int iy = y+y1-dy;
// skip edges ?
if (ix < 0 || ix >= src.getWidth()) {continue;}
if (iy < 0 || iy >= src.getHeight()) {continue;}
const float kv = k.get(x1, y1); // kernel's value
const float sv = src.get(ix, iy); // source image's value
val += kv*sv;
sum += std::abs(kv);
}
}
// set the normalized, convolved value
const float res = (normalize && sum != 0) ? (val/sum) : (val);
_assertNotNAN(res, "detected NaN");
dst.set(x, y, res);
}
}
}
};
}
#endif // K_CV_CONVOLVE_H
|
HSetMaintainer.h | #ifndef HSET_MAINTAINER_H
#define HSET_MAINTAINER_H
/*************************************************************
* Copyright: (C) 2012 by Markus Schordan *
* Author : Markus Schordan *
* License : see file LICENSE in the CodeThorn distribution *
*************************************************************/
#include <boost/unordered_set.hpp>
//#define HSET_MAINTAINER_DEBUG_MODE
/*!
* \author Markus Schordan
* \date 2012.
*/
template<typename KeyType,typename HashFun, typename EqualToPred>
class HSetMaintainer
: public boost::unordered_set<KeyType*,HashFun,EqualToPred>
{
public:
typedef pair<bool,const KeyType*> ProcessingResult;
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer() { _keepStatesDuringDeconstruction = false; }
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer(bool keepStates) { _keepStatesDuringDeconstruction = keepStates; }
/*!
* \author Marc Jasper
* \date 2016.
*/
virtual ~HSetMaintainer() {
if (!_keepStatesDuringDeconstruction){
typename HSetMaintainer::iterator i;
for (i=this->begin(); i!=this->end(); ++i) {
delete (*i);
}
}
}
bool exists(KeyType& s) {
return determine(s)!=0;
}
size_t id(const KeyType& s) {
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator i;
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
// in lack of operator '-' we compute the distance
size_t pos=0;
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator b;
b=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
while(b!=i) {
pos++;
++b;
}
return pos;
}
else
throw "Error: unknown value. Maintainer cannot determine an id.";
}
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
KeyType* determine(KeyType& s) {
KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(&s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
const KeyType* determine(const KeyType& s) {
const KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(const_cast<KeyType*>(&s));
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
ProcessingResult process(const KeyType* key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
if(iter!=this->end()) {
// found it!
res=make_pair(iter,false);
} else {
res=this->insert(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
}
res2=make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNewOrExisting(const KeyType* s) {
ProcessingResult res=process(s);
return res.second;
}
//! <true,const KeyType> if new element was inserted
//! <false,const KeyType> if element already existed
ProcessingResult process(KeyType key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(&key);
if(iter!=this->end()) {
// found it!
res=make_pair(iter,false);
} else {
// converting the stack allocated object to heap allocated
// this copies the entire object
// TODO: this can be avoided by providing a process function with a pointer arg
// this requires a more detailed result: pointer exists, alternate pointer with equal object exists, does not exist
KeyType* keyPtr=new KeyType();
*keyPtr=key;
res=this->insert(keyPtr);
if (!res.second) {
// this case should never occur, condition "iter!=this->end()" above would have been satisfied and
// this else branch would have therefore been ignored
cerr << "ERROR: HSetMaintainer: Element was not inserted even though it could not be found in the set." << endl;
ROSE_ASSERT(0);
delete keyPtr;
keyPtr = NULL;
}
}
#ifdef HSET_MAINTAINER_DEBUG_MODE
std::pair<typename HSetMaintainer::iterator, bool> res1;
res1=this->insert(key);
std::pair<typename HSetMaintainer::iterator, bool> res2;
res2=this->insert(key);
if(!(res1==res2)) {
cerr<< "Error: HsetMaintainer failed:"<<endl;
cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<endl;
cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<endl;
exit(1);
}
cerr << "HSET insert OK"<<endl;
#endif
res2=make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNew(KeyType& s) {
//std::pair<typename HSetMaintainer::iterator, bool> res=process(s);
ProcessingResult res=process(s);
if(res.first!=true) {
cerr<< "Error: HsetMaintainer::processNew failed:"<<endl;
cerr<< "res:";
cout <<":"<<res.first<<endl;
cout <<res.second->toString();
exit(1);
}
return res.second;
}
const KeyType* processNewOrExisting(KeyType& s) {
ProcessingResult res=process(s);
return res.second;
}
long numberOf() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::size(); }
long maxCollisions() {
size_t max=0;
for(size_t i=0; i<HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_count();++i) {
if(HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i)>max) {
max=HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i);
}
}
return max;
}
double loadFactor() {
return HSetMaintainer<KeyType,HashFun,EqualToPred>::load_factor();
}
long memorySize() const {
long mem=0;
for(typename HSetMaintainer<KeyType,HashFun,EqualToPred>::const_iterator i
=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end();
++i) {
mem+=(*i)->memorySize();
mem+=sizeof(*i);
}
return mem+sizeof(*this);
}
private:
//const KeyType* ptr(KeyType& s) {}
bool _keepStatesDuringDeconstruction;
};
#endif
|
ctl_fragment.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : ctl_fragment.c
* Description : Fragment Control Source
*
* + This is part of libaroma, an embedded ui toolkit.
* + 27/06/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_ctl_fragment_c__
#define __libaroma_ctl_fragment_c__
#include <aroma_internal.h>
#include "../ui/ui_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
/*************************** CONTROL HANDLERS *********************************/
dword _libaroma_ctl_fragment_msg(LIBAROMA_CONTROLP, LIBAROMA_MSGP);
void _libaroma_ctl_fragment_draw(LIBAROMA_CONTROLP, LIBAROMA_CANVASP);
void _libaroma_ctl_fragment_destroy(LIBAROMA_CONTROLP);
byte _libaroma_ctl_fragment_thread(LIBAROMA_CONTROLP);
static LIBAROMA_CONTROL_HANDLER _libaroma_ctl_fragment_handler={
message:_libaroma_ctl_fragment_msg,
draw:_libaroma_ctl_fragment_draw,
focus:NULL,
destroy:_libaroma_ctl_fragment_destroy,
thread:_libaroma_ctl_fragment_thread
};
/**************************** WINDOW HANDLERS *********************************/
byte _libaroma_ctl_fragment_window_invalidate(LIBAROMA_WINDOWP win, byte sync);
byte _libaroma_ctl_fragment_window_sync(LIBAROMA_WINDOWP win,
int x,int y,int w,int h);
byte _libaroma_ctl_fragment_window_updatebg(LIBAROMA_WINDOWP win);
byte _libaroma_ctl_fragment_window_control_isvisible(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
);
LIBAROMA_CANVASP _libaroma_ctl_fragment_window_control_draw_begin(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
);
void _libaroma_ctl_fragment_window_postfree(LIBAROMA_WINDOWP win);
static LIBAROMA_WINDOW_HANDLER _libaroma_ctl_fragment_win_handler={
prefree:NULL,
postfree:_libaroma_ctl_fragment_window_postfree,
updatebg:_libaroma_ctl_fragment_window_updatebg,
invalidate:_libaroma_ctl_fragment_window_invalidate,
sync:_libaroma_ctl_fragment_window_sync,
message_hooker:NULL,
control_draw_flush:NULL,
control_erasebg:NULL,
control_isvisible:_libaroma_ctl_fragment_window_control_isvisible,
control_draw_begin:_libaroma_ctl_fragment_window_control_draw_begin
};
/************************** FRAGMENT STRUCTURE ********************************/
/*
* Structure : __LIBAROMA_CTL_FRAGMENT
* Typedef : _LIBAROMA_CTL_FRAGMENT, * _LIBAROMA_CTL_FRAGMENTP
* Descriptions: button control internal structure
*/
typedef struct __LIBAROMA_CTL_FRAGMENT _LIBAROMA_CTL_FRAGMENT;
typedef struct __LIBAROMA_CTL_FRAGMENT * _LIBAROMA_CTL_FRAGMENTP;
struct __LIBAROMA_CTL_FRAGMENT{
LIBAROMA_WINDOWP * wins;
int win_n;
int win_pos;
int win_pos_out;
byte win_cleanup;
long transition_start;
long transition_duration;
float transition_state;
byte transition_type;
byte transision_delprev;
LIBAROMA_TRANSITION_CB transition_cb;
LIBAROMA_RECTP transition_rs;
LIBAROMA_RECTP transition_re;
byte redraw;
byte on_direct_canvas;
byte need_direct_canvas;
LIBAROMA_MUTEX mutex;
LIBAROMA_MUTEX dmutex;
int win_next_del_id;
};
typedef struct{
int id;
byte active_state;
LIBAROMA_CONTROLP ctl;
} _LIBAROMA_CTL_FRAGMENT_WIN, * _LIBAROMA_CTL_FRAGMENT_WINP;
/************************** INTERNAL FUNCTIONS ********************************/
/*
* Function : _libaroma_ctl_fragment_get_win_index
* Return Value: int
* Descriptions: get window index
*/
int _libaroma_ctl_fragment_get_win_index(
_LIBAROMA_CTL_FRAGMENTP me,
LIBAROMA_WINDOWP win){
int i;
for (i=0;i<me->win_n;i++){
if (me->wins[i]==win){
return i;
}
}
return -1;
} /* End of _libaroma_ctl_fragment_get_win_index */
/* FRAGMENT VALIDATOR MACRO */
#define _VALIDATE_FRAGMENT(error_ret) \
_LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) \
win->client_data; \
if (!wind){ return error_ret; } \
LIBAROMA_CONTROLP ctl=wind->ctl; \
_LIBAROMA_CTL_CHECK( \
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, error_ret); \
int win_index = _libaroma_ctl_fragment_get_win_index(me,win); \
if (win_index==-1){ return error_ret; }
/*
* Function : _libaroma_ctl_fragment_direct_canvas
* Return Value: byte
* Descriptions: set as direct canvas
*/
byte _libaroma_ctl_fragment_direct_canvas(LIBAROMA_CONTROLP ctl, byte state){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
libaroma_mutex_lock(me->dmutex);
if ((me->win_n<1)||(me->win_pos==-1)) {
libaroma_mutex_unlock(me->dmutex);
return 0;
}
LIBAROMA_WINDOWP win = me->wins[me->win_pos];
if (state){
me->on_direct_canvas=1;
}
else{
if (me->on_direct_canvas){
/*LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl);
if (ccv) {
libaroma_draw(win->dc,ccv,0,0,0);
libaroma_canvas_free(ccv);
}*/
if (win->bg) {
libaroma_draw(win->dc, win->bg, 0, 0, 0);
}
else libaroma_canvas_setcolor(win->dc, libaroma_colorget(NULL, NULL)->window_bg, 0xFF);
}
me->on_direct_canvas=0;
}
libaroma_mutex_unlock(me->dmutex);
return 1;
} /* End of _libaroma_ctl_fragment_direct_canvas */
/*
* Function : _libaroma_ctl_fragment_window_invalidate
* Return Value: byte
* Descriptions: window invalidate
*/
byte _libaroma_ctl_fragment_window_invalidate(LIBAROMA_WINDOWP win, byte sync){
_VALIDATE_FRAGMENT(0);
if ((win->dc)&&(win->bg)){
libaroma_draw(win->dc,win->bg,0,0,0);
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
/* draw no sync */
libaroma_control_draw(win->childs[i], 0);
}
}
if (sync){
return _libaroma_ctl_fragment_window_sync(win,0,0,win->w,win->h);
}
return 1;
} /* End of _libaroma_ctl_fragment_window_invalidate */
void _libaroma_ctl_fragment_measure(LIBAROMA_WINDOWP win){
_VALIDATE_FRAGMENT();
libaroma_mutex_lock(me->dmutex);
win->x = 0;
win->y = 0;
win->ax=ctl->x;
win->ay=ctl->y;
win->w = ctl->w;
win->h = ctl->h;/*
if (win->bg){
if ((win->bg->w!=win->w)||(win->bg->h!=win->h)){
libaroma_canvas_free(win->bg);
win->bg=NULL;
}
}
if (!win->bg){
win->bg = libaroma_canvas(
win->w,
win->h
);
}*/
if (win->dc){
if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){
libaroma_canvas_free(win->dc);
win->dc=NULL;
}
}
if (!win->dc){
win->dc = libaroma_canvas(
win->w,
win->h
);
}
_libaroma_ctl_fragment_window_updatebg(win);
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
libaroma_window_measure(win,win->childs[i]);
}
libaroma_mutex_unlock(me->dmutex);
}
/* send activate event */
void _libaroma_ctl_fragment_activate_win(LIBAROMA_WINDOWP win, byte active){
_VALIDATE_FRAGMENT();
LIBAROMA_MSG msg;
if (!active){
if (win->active){
wind->active_state=0;
libaroma_wm_compose(
&msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0
);
win->active=0;
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], &msg);
}
}
}
}
else{
if (!win->active){
wind->active_state=1;
if (!win->dc){
_libaroma_ctl_fragment_measure(win);
}
libaroma_wm_compose(
&msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 0, 0
);
int i;
win->active=1;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], &msg);
}
}
}
}
}
/*
* Function : _libaroma_ctl_fragment_window_postfree
* Return Value: void
* Descriptions: post free window
*/
void _libaroma_ctl_fragment_window_postfree(LIBAROMA_WINDOWP win){
_VALIDATE_FRAGMENT();
if (wind){
free(wind);
win->client_data=NULL;
}
} /* End of _libaroma_ctl_fragment_window_postfree */
/*
* Function : _libaroma_ctl_fragment_window_sync
* Return Value: byte
* Descriptions: window sync
*/
byte _libaroma_ctl_fragment_window_sync(LIBAROMA_WINDOWP win,
int x,int y,int w,int h){
_VALIDATE_FRAGMENT(0);
if (!wind->active_state){
return 0;
}
me->redraw=1;
return 1;
} /* End of _libaroma_ctl_fragment_window_sync */
/*
* Function : _libaroma_ctl_fragment_window_control_isvisible
* Return Value: byte
* Descriptions: check if control is visible
*/
byte _libaroma_ctl_fragment_window_control_isvisible(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
){
_VALIDATE_FRAGMENT(0);
if (!wind->active_state){
return 0;
}
return 1;
} /* End of _libaroma_ctl_fragment_window_control_isvisible */
/*
* Function : _libaroma_ctl_fragment_window_control_draw_begin
* Return Value: LIBAROMA_CANVASP
* Descriptions: get canvas for child control
*/
LIBAROMA_CANVASP _libaroma_ctl_fragment_window_control_draw_begin(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
){
_VALIDATE_FRAGMENT(NULL);
if (!wind->active_state){
return NULL;
}
LIBAROMA_CANVASP c=NULL;
libaroma_mutex_lock(me->dmutex);
if (me->on_direct_canvas){
int x = cctl->x;
int y = cctl->y;
int w = cctl->w;
int h = cctl->h;
LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl);
if (ccv){
if ((ccv->w>x)&&(ccv->h>y)){
c = libaroma_canvas_area(ccv,x,y,w,h);
}
libaroma_canvas_free(ccv);
}
}
else {
if (win->dc!=NULL){
c = libaroma_canvas_area(
win->dc,
cctl->x, cctl->y, cctl->w, cctl->h
);
}
}
libaroma_mutex_unlock(me->dmutex);
return c;
} /* End of _libaroma_ctl_fragment_window_control_draw_begin */
/*
* Function : _libaroma_ctl_fragment_window_updatebg
* Return Value: byte
* Descriptions: window update background
*/
byte _libaroma_ctl_fragment_window_updatebg(LIBAROMA_WINDOWP win){
_VALIDATE_FRAGMENT(0);
libaroma_mutex_lock(me->dmutex);
int w = win->w;
int h = win->h;
if (win->bg!=NULL){
if ((win->bg->w==w)&&(win->bg->h==h)){
libaroma_mutex_unlock(me->dmutex);
return 1;
}
libaroma_canvas_free(win->bg);
}
win->bg = libaroma_canvas(w,h);
libaroma_canvas_setcolor(
win->bg,
libaroma_colorget(ctl,NULL)->window_bg,
0xff
);
libaroma_mutex_unlock(me->dmutex);
return 1;
} /* End of _libaroma_ctl_fragment_window_sync */
/*
* Function : _libaroma_ctl_fragment_draw
* Return Value: void
* Descriptions: draw callback
*/
void _libaroma_ctl_fragment_draw(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CANVASP c){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP,
);
libaroma_mutex_lock(me->mutex);
if ((me->win_n<1)||(me->win_pos==-1)) {
libaroma_control_erasebg(ctl,c);
me->redraw=0;
libaroma_mutex_unlock(me->mutex);
return;
}
if (!me->redraw){
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<me->win_n;i++){
_LIBAROMA_CTL_FRAGMENT_WINP wind =
(_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data;
if (wind->active_state){
if (!me->wins[i]->active){
_libaroma_ctl_fragment_window_invalidate(me->wins[i],0);
}
}
}
}
/* draw window canvas */
libaroma_mutex_lock(me->dmutex);
if (!me->on_direct_canvas){
if (me->win_pos_out==-1){
LIBAROMA_WINDOWP awin = me->wins[me->win_pos];
if (awin->dc){
libaroma_draw(c,awin->dc,0,0,0);
}
else{
libaroma_control_erasebg(ctl,c);
}
}
else{
LIBAROMA_WINDOWP awin = me->wins[me->win_pos];
LIBAROMA_WINDOWP owin = me->wins[me->win_pos_out];
if (me->transition_state==1){
if (awin->dc){
libaroma_draw(c,awin->dc,0,0,0);
}
else{
libaroma_control_erasebg(ctl,c);
}
me->transition_state=0;
}
else if ((me->transition_cb)&&(owin->dc)&&(awin->dc)){
me->transition_cb(
c,
owin->dc,
awin->dc,
me->transition_state,
me->transition_rs,
me->transition_re
);
}
else{
/* simple alpha transition */
if (owin->dc){
libaroma_draw(c,owin->dc,0,0,0);
}
else{
libaroma_control_erasebg(ctl,c);
}
if (awin->dc){
libaroma_draw_opacity(c,awin->dc,0,0,0,0xff*me->transition_state);
}
}
}
}
libaroma_mutex_unlock(me->dmutex);
/* need revert to direct canvas */
if (me->need_direct_canvas){
me->need_direct_canvas=0;
_libaroma_ctl_fragment_direct_canvas(ctl, 1);
}
me->redraw=0;
libaroma_mutex_unlock(me->mutex);
} /* End of _libaroma_ctl_fragment_draw */
byte libaroma_ctl_fragment_del_window_nomutex(
LIBAROMA_CONTROLP ctl, int id);
/*
* Function : _libaroma_ctl_fragment_thread
* Return Value: byte
* Descriptions: control thread callback
*/
byte _libaroma_ctl_fragment_thread(LIBAROMA_CONTROLP ctl) {
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
if ((me->win_n<1)||(me->win_pos==-1)) {
return 0;
}
libaroma_mutex_lock(me->mutex);
if (me->win_next_del_id!=-1){
libaroma_ctl_fragment_del_window_nomutex(ctl,me->win_next_del_id);
me->win_next_del_id=-1;
}
byte is_draw = me->redraw;
{
int j;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (j=0;j<me->win_n;j++){
LIBAROMA_WINDOWP win = me->wins[j];
_LIBAROMA_CTL_FRAGMENT_WINP wind =
(_LIBAROMA_CTL_FRAGMENT_WINP) win->client_data;
if (wind->active_state){
if (win->active){
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
LIBAROMA_CONTROLP c=win->childs[i];
if (c->handler->thread!=NULL){
if (c->handler->thread(c)){
if (libaroma_control_draw(c,0)){
is_draw=1;
}
}
}
}
}
}
}
}
{
if ((me->transition_start!=0)&&(me->win_pos_out!=-1)){
float nowstate=libaroma_duration_state(
me->transition_start, me->transition_duration
);
if (nowstate!=me->transition_state){
if (nowstate>=1){
me->transition_start=0;
me->transition_state=1;
me->need_direct_canvas=1;
if (me->transision_delprev){
_LIBAROMA_CTL_FRAGMENT_WINP windd=
(_LIBAROMA_CTL_FRAGMENT_WINP)
me->wins[me->win_pos_out]->client_data;
me->win_next_del_id=windd->id;
}
_libaroma_ctl_fragment_activate_win(
me->wins[me->win_pos_out], 0
);
me->win_pos_out=-1;
me->transision_delprev=0;
}
else{
me->transition_state=nowstate;
}
is_draw=1;
}
}
}
libaroma_mutex_unlock(me->mutex);
return is_draw;
} /* End of _libaroma_ctl_fragment_thread */
/*
* Function : _libaroma_ctl_fragment_destroy
* Return Value: void
* Descriptions: destroy callback
*/
void _libaroma_ctl_fragment_destroy(
LIBAROMA_CONTROLP ctl){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP,
);
libaroma_mutex_lock(me->mutex);
if (me->win_n>0){
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<me->win_n;i++){
libaroma_window_free(me->wins[i]);
}
free(me->wins);
me->wins=NULL;
me->win_n=0;
}
libaroma_mutex_unlock(me->mutex);
libaroma_mutex_free(me->mutex);
libaroma_mutex_free(me->dmutex);
free(me);
} /* End of _libaroma_ctl_fragment_destroy */
/*
* Function : _libaroma_ctl_fragment_msg
* Return Value: byte
* Descriptions: message callback
*/
dword _libaroma_ctl_fragment_msg(
LIBAROMA_CONTROLP ctl,
LIBAROMA_MSGP msg){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
dword ret = 0;
switch(msg->msg){
case LIBAROMA_MSG_WIN_ACTIVE:
case LIBAROMA_MSG_WIN_INACTIVE:
case LIBAROMA_MSG_WIN_RESIZE:
{
libaroma_mutex_lock(me->mutex);
int z;
for (z=0;z<me->win_n;z++){
LIBAROMA_WINDOWP win = me->wins[z];
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
win->client_data;
if (!windn->active_state){
continue;
}
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
}
libaroma_mutex_unlock(me->mutex);
}
break;
case LIBAROMA_MSG_WIN_MEASURED:
{
int z;
libaroma_mutex_lock(me->mutex);
for (z=0;z<me->win_n;z++){
LIBAROMA_WINDOWP win = me->wins[z];
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
win->client_data;
if (windn->active_state){
_libaroma_ctl_fragment_measure(win);
}
}
libaroma_mutex_unlock(me->mutex);
}
break;
case LIBAROMA_MSG_TOUCH:
{
libaroma_mutex_lock(me->mutex);
if ((me->win_n<1)||(me->win_pos==-1)) {
libaroma_mutex_unlock(me->mutex);
return 0;
}
LIBAROMA_WINDOWP win = me->wins[me->win_pos];
if (me->win_pos_out!=-1){
me->win_cleanup=1;
libaroma_mutex_unlock(me->mutex);
return 0;
}
if ((msg->state!=LIBAROMA_HID_EV_STATE_DOWN)&&(me->win_cleanup)){
libaroma_mutex_unlock(me->mutex);
return 0;
}
me->win_cleanup=0;
int x = msg->x;
int y = msg->y;
libaroma_window_calculate_pos(NULL,ctl,&x,&y);
msg->x = x;
msg->y = y;
/* touch handler */
if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){
win->touched = NULL;
int i;
for (i=0;i<win->childn;i++){
if (_libaroma_window_is_inside(win->childs[i],x,y)){
win->touched = win->childs[i];
break;
}
}
if (win->touched!=NULL){
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
}
}
else if (win->touched!=NULL){
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
if (msg->state==LIBAROMA_HID_EV_STATE_UP){
win->touched=NULL;
}
}
libaroma_mutex_unlock(me->mutex);
}
break;
}
return ret;
} /* End of _libaroma_ctl_fragment_msg */
/*
* Function : libaroma_ctl_fragment
* Return Value: LIBAROMA_CONTROLP
* Descriptions: create button control
*/
LIBAROMA_CONTROLP libaroma_ctl_fragment(
LIBAROMA_WINDOWP win,
word id, int x, int y, int w, int h
){
if (!win){
ALOGW("pager need direct window attach");
return NULL;
}
/* init internal data */
_LIBAROMA_CTL_FRAGMENTP me = (_LIBAROMA_CTL_FRAGMENTP)
calloc(sizeof(_LIBAROMA_CTL_FRAGMENT),1);
if (!me){
ALOGW("libaroma_ctl_fragment alloc pager memory failed");
return NULL;
}
me->win_pos_out=-1;
me->win_pos=-1;
me->wins = NULL;
me->on_direct_canvas = 1;
me->win_next_del_id=-1;
/* init control */
LIBAROMA_CONTROLP ctl =
libaroma_control_new(
id,
x, y, w, h,
libaroma_dp(48),libaroma_dp(48), /* min size */
(voidp) me,
&_libaroma_ctl_fragment_handler,
NULL
);
if (!ctl){
free(me);
return NULL;
}
libaroma_mutex_init(me->mutex);
libaroma_mutex_init(me->dmutex);
return libaroma_window_attach(win,ctl);
} /* End of libaroma_ctl_fragment */
/*
* Function : libaroma_ctl_fragment_new_window
* Return Value: LIBAROMA_WINDOWP
* Descriptions: new window
*/
LIBAROMA_WINDOWP libaroma_ctl_fragment_new_window(
LIBAROMA_CONTROLP ctl, int id){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, NULL
);
if (!ctl->window){
ALOGW("libaroma_ctl_fragment_new_window fragment should append to "
"window first");
return NULL;
}
libaroma_mutex_lock(me->mutex);
int new_pos = me->win_n;
if (me->win_n==0){
me->wins=(LIBAROMA_WINDOWP *) calloc(sizeof(LIBAROMA_WINDOWP),1);
if (!me->wins){
libaroma_mutex_unlock(me->mutex);
ALOGW("libaroma_ctl_fragment_new_window calloc window holder failed");
return NULL;
}
me->win_n=1;
}
else{
int i;
for (i=0;i<me->win_n;i++){
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
me->wins[i]->client_data;
if (id==windn->id){
ALOGW("libaroma_ctl_fragment_new_window id already exist");
return NULL;
}
}
LIBAROMA_WINDOWP * newins =(LIBAROMA_WINDOWP *) realloc(
me->wins, sizeof(LIBAROMA_WINDOWP)*(me->win_n+1));
if (newins){
me->wins=newins;
me->win_n++;
}
else{
libaroma_mutex_unlock(me->mutex);
ALOGW("libaroma_ctl_fragment_new_window realloc window holder failed");
return NULL;
}
}
me->wins[new_pos] = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1);
if (!me->wins[new_pos]){
ALOGW("libaroma_ctl_fragment_new_window alloc window data failed");
if (me->win_n==1){
free(me->wins);
me->win_n=0;
me->wins=NULL;
}
else{
me->wins =(LIBAROMA_WINDOWP *) realloc(me->wins,
sizeof(LIBAROMA_WINDOWP)*(me->win_n-1));
me->win_n--;
}
libaroma_mutex_unlock(me->mutex);
return NULL;
}
LIBAROMA_WINDOWP nwin = me->wins[new_pos];
nwin->handler=&_libaroma_ctl_fragment_win_handler;
nwin->parent=ctl->window;
_LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) calloc(
sizeof(_LIBAROMA_CTL_FRAGMENT_WIN), 1);
wind->id = id;
wind->active_state = 0;
wind->ctl = ctl;
nwin->client_data = (voidp) wind;
libaroma_mutex_unlock(me->mutex);
return me->wins[new_pos];
} /* End of libaroma_ctl_fragment_new_window */
/*
* Function : libaroma_ctl_fragment_get_window
* Return Value: LIBAROMA_WINDOWP
* Descriptions: get window
*/
LIBAROMA_WINDOWP libaroma_ctl_fragment_get_window(
LIBAROMA_CONTROLP ctl, int id){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, NULL
);
int i;
libaroma_mutex_lock(me->mutex);
for (i=0;i<me->win_n;i++){
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
me->wins[i]->client_data;
if (id==windn->id){
libaroma_mutex_unlock(me->mutex);
return me->wins[i];
}
}
libaroma_mutex_unlock(me->mutex);
return NULL;
}
/*
* Function : libaroma_ctl_fragment_del_window
* Return Value: byte
* Descriptions: delete window
*/
byte libaroma_ctl_fragment_del_window_nomutex(
LIBAROMA_CONTROLP ctl, int id){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
/* wait for transition */
while(me->win_pos_out!=-1){
libaroma_sleep(16);
}
int i;
int did = -1;
LIBAROMA_WINDOWP win=NULL;
for (i=0;i<me->win_n;i++){
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
me->wins[i]->client_data;
if (id==windn->id){
win=me->wins[i];
did=i;
break;
}
}
byte ret=0;
if (me->win_pos==did){
ALOGW("libaroma_ctl_fragment_del_window cannot delete active window");
}
else if (win){
int newn = me->win_n-1;
if (newn<1){
if (me->wins){
free(me->wins);
me->wins=NULL;
}
me->win_n=0;
}
else{
LIBAROMA_WINDOWP * newins = calloc(sizeof(LIBAROMA_WINDOWP),newn);
int n=0;
for (i=0;i<me->win_n;i++){
if (i!=did){
newins[n++]=me->wins[i];
}
}
free(me->wins);
me->wins=newins;
me->win_n=newn;
}
libaroma_window_free(win);
}
else{
ALOGW("libaroma_ctl_fragment_del_window window id not found");
}
return ret;
}
byte libaroma_ctl_fragment_del_window(
LIBAROMA_CONTROLP ctl, int id){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
libaroma_mutex_lock(me->mutex);
byte ret=libaroma_ctl_fragment_del_window_nomutex(ctl,id);
libaroma_mutex_unlock(me->mutex);
return ret;
}
/*
* Function : libaroma_ctl_fragment_set_active_window
* Return Value: byte
* Descriptions: set active page
*/
byte libaroma_ctl_fragment_set_active_window(
LIBAROMA_CONTROLP ctl, int id,
byte anitype, long duration, byte remove_prev,
LIBAROMA_TRANSITION_CB transcb,
LIBAROMA_RECTP rect_start,
LIBAROMA_RECTP rect_end
){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0
);
/* wait for transition */
while(me->win_pos_out!=-1){
libaroma_sleep(16);
}
byte ret=0;
int i;
int did = -1;
libaroma_mutex_lock(me->mutex);
LIBAROMA_WINDOWP win=NULL;
for (i=0;i<me->win_n;i++){
_LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP)
me->wins[i]->client_data;
if (id==windn->id){
win=me->wins[i];
did=i;
break;
}
}
if (did!=-1){
if (me->win_pos!=did){
_libaroma_ctl_fragment_activate_win(win,1);
libaroma_sleep(120);
if (me->win_pos!=-1){
me->transition_start=libaroma_tick();
me->transition_duration=duration;
me->transition_type=anitype;
me->transition_state=0;
me->transision_delprev=remove_prev;
me->transition_cb=transcb;
me->transition_rs=rect_start;
me->transition_re=rect_end;
_LIBAROMA_CTL_FRAGMENT_WINP windid =
(_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[did]->client_data;
windid->active_state=2;
me->win_pos_out=me->win_pos;
me->win_pos=did;
_libaroma_ctl_fragment_direct_canvas(ctl,0);
}
else{
me->win_pos_out=me->win_pos;
me->win_pos=did;
}
ret=1;
me->redraw=1;
}
else{
ALOGW("libaroma_ctl_fragment_set_active_window "
"cannot reactivate active window");
}
}
else{
ALOGW("libaroma_ctl_fragment_set_active_window window id not found");
}
libaroma_mutex_unlock(me->mutex);
return ret;
}
#ifdef __cplusplus
}
#endif
#endif /* __libaroma_ctl_fragment_c__ */
|
GB_binop__lxor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32)
// A*D function (colscale): GB (_AxD__lxor_uint32)
// D*A function (rowscale): GB (_DxB__lxor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32)
// C=scalar+B GB (_bind1st__lxor_uint32)
// C=scalar+B' GB (_bind1st_tran__lxor_uint32)
// C=A+scalar GB (_bind2nd__lxor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trace.c | /*
* trace.c - This file contains the functions for firing primary rays
* and handling subsequent calculations
*
* $Id: trace.c,v 1.107 2004/05/28 15:23:34 johns Exp $
*/
#include "machine.h"
#include "types.h"
#include "macros.h"
#include "vector.h"
#include "shade.h"
#include "camera.h"
#include "util.h"
#include "threads.h"
#include "parallel.h"
#include "intersect.h"
#include "ui.h"
#include "trace.h"
color trace(ray * primary) {
if (primary->depth > 0) {
intersect_objects(primary);
return primary->scene->shader(primary);
}
/* if ray is truncated, return the background as its color */
return primary->scene->background;
}
void * thread_trace(thr_parms * t) {
unsigned long * local_mbox = NULL;
scenedef * scene;
int addr, R,G,B;
unsigned char * img;
color col;
ray primary;
int x, y, do_ui, hskip;
int startx, stopx, xinc, starty, stopy, yinc, hsize, vres;
#if defined(_OPENMP)
#pragma omp parallel
{
#endif
/*
* Copy all of the frequently used parameters into local variables.
* This seems to improve performance, especially on NUMA systems.
*/
startx = t->startx;
stopx = t->stopx;
xinc = t->xinc;
starty = t->starty;
stopy = t->stopy;
yinc = t->yinc;
scene = t->scene;
img = scene->img;
hsize = scene->hres*3;
vres = scene->vres;
hskip = xinc * 3;
do_ui = (scene->mynode == 0 && t->tid == 0);
#if !defined(DISABLEMBOX)
/* allocate mailbox array per thread... */
#if defined(_OPENMP)
local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->numobjects, 1);
#else
if (t->local_mbox == NULL)
local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->objgroup.numobjects, 1);
else
local_mbox = t->local_mbox;
#endif
#else
local_mbox = NULL; /* mailboxes are disabled */
#endif
#if defined(_OPENMP)
#pragma omp single
#endif
/*
* If we are getting close to integer wraparound on the
* ray serial numbers, we need to re-clear the mailbox
* array(s). Each thread maintains its own serial numbers
* so only those threads that are getting hit hard will
* need to re-clear their mailbox arrays. In all likelihood,
* the threads will tend to hit their counter limits at about
* the same time though.
* When compiled on platforms with a 64-bit long, this counter won't
* wraparound in _anyone's_ lifetime, so no need to even check....
* On lesser-bit platforms, we're not quite so lucky, so we have to check.
*/
#if !defined(LP64)
if (local_mbox != NULL) {
if (t->serialno > (((unsigned long) 1) << ((sizeof(unsigned long) * 8) - 3))) {
memset(local_mbox, 0, sizeof(unsigned long) * scene->objgroup.numobjects);
t->serialno = 1;
}
}
#endif
/* setup the thread-specific properties of the primary ray(s) */
camray_init(scene, &primary, t->serialno, local_mbox);
#if defined(_OPENMP)
#pragma omp for schedule(runtime)
#endif
for (y=starty; y<=stopy; y+=yinc) {
addr = hsize * (y - 1) + (3 * (startx - 1)); /* scanline address */
for (x=startx; x<=stopx; x+=xinc) {
col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
R = (int) (col.r * 255.0f); /* quantize float to integer */
G = (int) (col.g * 255.0f); /* quantize float to integer */
B = (int) (col.b * 255.0f); /* quantize float to integer */
if (R > 255) R = 255; /* clamp pixel value to range 0-255 */
img[addr ] = (byte) R; /* Store final pixel to the image buffer */
if (G > 255) G = 255; /* clamp pixel value to range 0-255 */
img[addr + 1] = (byte) G; /* Store final pixel to the image buffer */
if (B > 255) B = 255; /* clamp pixel value to range 0-255 */
img[addr + 2] = (byte) B; /* Store final pixel to the image buffer */
addr += hskip;
} /* end of x-loop */
if (do_ui && !((y-1) % 64)) {
rt_ui_progress((100 * y) / vres); /* call progress meter callback */
}
#ifdef MPI
if (scene->nodes > 1) {
rt_thread_barrier(t->runbar, 1);
rt_sendrecvscanline(scene->parbuf);
}
#endif
} /* end y-loop */
t->serialno = primary.serial + 1;
#if defined(_OPENMP)
if (local_mbox != NULL)
free(local_mbox);
#else
if (t->local_mbox == NULL) {
if (local_mbox != NULL)
free(local_mbox);
}
#endif
if (scene->nodes == 1)
rt_thread_barrier(t->runbar, 1);
#if defined(_OPENMP)
}
#endif
return(NULL);
}
|
dpotri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpotri.c, normal z -> d, Fri Sep 28 17:38:09 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_potri
*
* Computes the inverse of a symmetric positive definite
* matrix A using the Cholesky factorization
* \f[ A = U^T \times U, \f]
* or
* \f[ A = L \times L^T. \f]
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular factor U or L from the Cholesky
* factorization A = U^T*U or A = L*L^T, as computed by
* plasma_dpotrf.
* On exit, the upper or lower triangle of the (symmetric)
* inverse of A, overwriting the input factor U or L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PLASMA_SUCCESS successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the (i,i) element of the factor U or L is
* zero, and the inverse could not be computed.
*
*******************************************************************************
*
* @sa plasma_cpotri
* @sa plasma_dpotri
* @sa plasma_spotri
*
******************************************************************************/
int plasma_dpotri(plasma_enum_t uplo,
int n,
double *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trtri(plasma, PlasmaRealDouble, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Perform computation.
plasma_omp_dpotri(uplo, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_potri
*
* Computes the inverse of a complex symmetric
* positive definite matrix A using the Cholesky factorization
* A = U^T*U or A = L*L^T computed by plasma_dpotrf.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* On entry, the triangular factor U or L from the Cholesky
* factorization A = U^T*U or A = L*L^T, as computed by
* plasma_dpotrf.
* On exit, the upper or lower triangle of the (symmetric)
* inverse of A, overwriting the input factor U or L.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dpotri
* @sa plasma_omp_dpotri
* @sa plasma_omp_cpotri
* @sa plasma_omp_dpotri
* @sa plasma_omp_spotri
*
******************************************************************************/
void plasma_omp_dpotri(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0) {
return;
}
// Invert triangular part.
plasma_pdtrtri(uplo, PlasmaNonUnit, A, sequence, request);
// Compute product of upper and lower triangle.
plasma_pdlauum(uplo, A, sequence, request);
}
|
l8t1qa.c | #include<stdio.h>
#include "gdal.h"
#include<omp.h>
/* L8 Qa bits [4] Cloud
* 0 -> class 0: Not Cloud
* 1 -> class 1: Cloud
*/
unsigned int L8QA_cloud(unsigned int pixel) {
unsigned int qa;
pixel >>= 4; /*bits [4] become [0]*/
qa = (unsigned int) (pixel & 0x01);
return qa;
}
/* L8 Qa bits [5-6] Cloud confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cloud (0-33% probability)
* 10 -> class 2: Maybe Cloud (34-66% probability)
* 11 -> class 3: Cloud (66-100% probability)
*/
unsigned int L8QA_cloud_confidence(unsigned int pixel) {
unsigned int qa;
pixel >>= 5; /*bits [5-6] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
/* L8 Qa bits [7-8] Cloud shadow confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cloud shadow (0-33% probability)
* 10 -> class 2: Maybe Cloud shadow (34-66% probability)
* 11 -> class 3: Cloud shadow (66-100% probability)
*/
unsigned int L8QA_cloud_shadow(unsigned int pixel) {
unsigned int qa;
pixel >>= 7; /*bits [7-8] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
/* L8 Qa bits [11-12] Cirrus confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cirrus (0-33% probability)
* 10 -> class 2: Maybe Cirrus (34-66% probability)
* 11 -> class 3: Cirrus (66-100% probability)
*/
unsigned int L8QA_cirrus_confidence(unsigned int pixel) {
unsigned int qa;
pixel >>= 11; /*bits [11-12] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
void usage()
{
printf( "-----------------------------------------\n");
printf( "--L8 Processing chain--OpenMP code----\n");
printf( "-----------------------------------------\n");
printf( "./L8 inL8B1 inL8B2 inL8B3 inL8b4 inL8b5 inL8b6 inL8B7 inL8_QA\n");
printf( "\toutL8vi outL8wi outL8LSWI outL8NBR2\n");
printf( "\toutL8B1 outL8B2 outL8B3 outL8b4 outL8b5 outL8b6 outL8B7\n");
printf( "-----------------------------------------\n");
printf( "inL8b1\t\tL8 Band 1 UInt16 (ShortBlue)\n");
printf( "inL8b2\t\tL8 Band 2 UInt16 (Blue)\n");
printf( "inL8b3\t\tL8 Band 3 UInt16 (Green)\n");
printf( "inL8b4\t\tL8 Band 4 UInt16 (Red)\n");
printf( "inL8b5\t\tL8 Band 5 UInt16 (NIR)\n");
printf( "inL8b6\t\tL8 Band 6 UInt16 (SWIR1)\n");
printf( "inL8b7\t\tL8 Band 7 UInt16 (SWIR2)\n");
printf( "inL8_QA\t\tL8_Qa UInt16\n");
printf( "outL8vi\tCloud removed L8 NDVI output [0-10000]\n");
printf( "outL8wi\tCloud removed L8 NDWI output [0-10000]\n");
printf( "outL8lswi\tCloud removed L8 LSWI output [0-10000]\n");
printf( "outL8nbr2\tCloud removed L8 NBR2 output [0-10000]\n");
printf( "inL8b1\t\tCloud removed L8 Band 1 UInt16 (ShortBlue)\n");
printf( "inL8b2\t\tCloud removed L8 Band 2 UInt16 (Blue)\n");
printf( "inL8b3\t\tCloud removed L8 Band 3 UInt16 (Green)\n");
printf( "inL8b4\t\tCloud removed L8 Band 4 UInt16 (Red)\n");
printf( "inL8b5\t\tCloud removed L8 Band 5 UInt16 (NIR)\n");
printf( "inL8b6\t\tCloud removed L8 Band 6 UInt16 (SWIR1)\n");
printf( "inL8b7\t\tCloud removed L8 Band 7 UInt16 (SWIR2)\n");
return;
}
int main( int argc, char *argv[] )
{
if( argc < 9 ) {
usage();
return 1;
}
char *inB1 = argv[1]; //L8 band 1 (ShortBlue)
char *inB2 = argv[2]; //L8 band 2 (Blue)
char *inB3 = argv[3]; //L8 band 3 (Green)
char *inB4 = argv[4]; //L8 band 4 (Red)
char *inB5 = argv[5]; //L8 band 5 (NIR)
char *inB6 = argv[6]; //L8 band 6 (SWIR1)
char *inB7 = argv[7]; //L8 band 7 (SIWR2)
char *inB8 = argv[8]; //L8_QA
char *L8viF = argv[9]; //OUT NDVI
char *L8wiF = argv[10]; //OUT NDWI
char *L8lswiF= argv[11]; //OUT LSWI
char *L8nbr2F= argv[12]; //OUT NBR2
char *L8B1 = argv[13]; //Out L8 band 1 (ShortBlue)
char *L8B2 = argv[14]; //Out L8 band 2 (Blue)
char *L8B3 = argv[15]; //Out L8 band 3 (Green)
char *L8B4 = argv[16]; //Out L8 band 4 (Red)
char *L8B5 = argv[17]; //Out L8 band 5 (NIR)
char *L8B6 = argv[18]; //Out L8 band 6 (SWIR1)
char *L8B7 = argv[19]; //Out L8 band 7 (SIWR2)
GDALAllRegister();
GDALDatasetH hD1 = GDALOpen(inB1,GA_ReadOnly);//L8 band 1 (ShortBlue)
GDALDatasetH hD2 = GDALOpen(inB2,GA_ReadOnly);//L8 band 2 (Blue)
GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//L8 band 3 (Green)
GDALDatasetH hD4 = GDALOpen(inB4,GA_ReadOnly);//L8 band 4 (Red)
GDALDatasetH hD5 = GDALOpen(inB5,GA_ReadOnly);//L8 band 5 (NIR)
GDALDatasetH hD6 = GDALOpen(inB6,GA_ReadOnly);//L8 band 6 (SWIR1)
GDALDatasetH hD7 = GDALOpen(inB7,GA_ReadOnly);//L8 band 7 (SWIR2)
GDALDatasetH hD8 = GDALOpen(inB8,GA_ReadOnly);//L8_QA
if(hD1==NULL||hD2==NULL||hD3==NULL||hD4==NULL||hD5==NULL||
hD6==NULL||hD7==NULL||hD8==NULL){
printf("One or more input files ");
printf("could not be loaded\n");
exit(1);
}
GDALDriverH hDr1 = GDALGetDatasetDriver(hD1);
GDALDriverH hDr2 = GDALGetDatasetDriver(hD2);
GDALDriverH hDr3 = GDALGetDatasetDriver(hD3);
GDALDriverH hDr4 = GDALGetDatasetDriver(hD4);
GDALDriverH hDr5 = GDALGetDatasetDriver(hD5);
GDALDriverH hDr6 = GDALGetDatasetDriver(hD6);
GDALDriverH hDr7 = GDALGetDatasetDriver(hD7);
GDALDatasetH hDO1 = GDALCreateCopy(hDr1,L8B1,hD1,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO2 = GDALCreateCopy(hDr2,L8B2,hD2,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO3 = GDALCreateCopy(hDr3,L8B3,hD3,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO4 = GDALCreateCopy(hDr4,L8B4,hD4,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO5 = GDALCreateCopy(hDr5,L8B5,hD5,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO6 = GDALCreateCopy(hDr6,L8B6,hD6,FALSE,NULL,NULL,NULL);
GDALDatasetH hDO7 = GDALCreateCopy(hDr7,L8B7,hD7,FALSE,NULL,NULL,NULL);
GDALDatasetH hDOutVI = GDALCreateCopy(hDr4,L8viF,hD4,FALSE,NULL,NULL,NULL);
GDALDatasetH hDOutWI = GDALCreateCopy(hDr4,L8wiF,hD4,FALSE,NULL,NULL,NULL);
GDALDatasetH hDOutLSWI = GDALCreateCopy(hDr4,L8lswiF,hD4,FALSE,NULL,NULL,NULL);
GDALDatasetH hDOutNBR2 = GDALCreateCopy(hDr4,L8nbr2F,hD4,FALSE,NULL,NULL,NULL);
GDALRasterBandH hBOutVI = GDALGetRasterBand(hDOutVI,1);
GDALRasterBandH hBOutWI = GDALGetRasterBand(hDOutWI,1);
GDALRasterBandH hBOutLSWI = GDALGetRasterBand(hDOutLSWI,1);
GDALRasterBandH hBOutNBR2 = GDALGetRasterBand(hDOutNBR2,1);
GDALRasterBandH hB1 = GDALGetRasterBand(hD1,1);//L8 band 1 (ShortBlue)
GDALRasterBandH hB2 = GDALGetRasterBand(hD2,1);//L8 band 2 (Blue)
GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//L8 band 3 (Green)
GDALRasterBandH hB4 = GDALGetRasterBand(hD4,1);//L8 band 4 (Red)
GDALRasterBandH hB5 = GDALGetRasterBand(hD5,1);//L8 band 5 (NIR)
GDALRasterBandH hB6 = GDALGetRasterBand(hD6,1);//L8 band 6 (SWIR1)
GDALRasterBandH hB7 = GDALGetRasterBand(hD7,1);//L8 band 7 (SWIR2)
GDALRasterBandH hB8 = GDALGetRasterBand(hD8,1);//L8_QA
GDALRasterBandH hBO1 = GDALGetRasterBand(hDO1,1);//out L8 band 1 (ShortBlue)
GDALRasterBandH hBO2 = GDALGetRasterBand(hDO2,1);//out L8 band 2 (Blue)
GDALRasterBandH hBO3 = GDALGetRasterBand(hDO3,1);//out L8 band 3 (Green)
GDALRasterBandH hBO4 = GDALGetRasterBand(hDO4,1);//out L8 band 4 (Red)
GDALRasterBandH hBO5 = GDALGetRasterBand(hDO5,1);//out L8 band 5 (NIR)
GDALRasterBandH hBO6 = GDALGetRasterBand(hDO6,1);//out L8 band 6 (SWIR1)
GDALRasterBandH hBO7 = GDALGetRasterBand(hDO7,1);//out L8 band 7 (SWIR2)
int nX = GDALGetRasterBandXSize(hB4);
int nY = GDALGetRasterBandYSize(hB4);
int N=nX*nY;
unsigned int *l1 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l2 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l3 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l4 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l5 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l6 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l7 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l8 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutVI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutWI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutLSWI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutNBR2 = (unsigned int *) malloc(sizeof(unsigned int)*N);
int rc, qac, qacc, qacs, qaci;
//L8 band 4/5/6/7 (red/NIR/SWIR1/SWIR2)
GDALRasterIO(hB1,GF_Read,0,0,nX,nY,l1,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB2,GF_Read,0,0,nX,nY,l2,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB3,GF_Read,0,0,nX,nY,l3,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB4,GF_Read,0,0,nX,nY,l4,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB5,GF_Read,0,0,nX,nY,l5,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB6,GF_Read,0,0,nX,nY,l6,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB7,GF_Read,0,0,nX,nY,l7,nX,nY,GDT_UInt32,0,0);
//L8_QA
GDALRasterIO(hB8,GF_Read,0,0,nX,nY,l8,nX,nY,GDT_UInt32,0,0);
//Get the number of threads available
int n = omp_get_num_threads();
//Do not stall the computer
omp_set_num_threads(n-1);
#pragma omp parallel for default(none) \
private (rc, qac, qacc, qacs, qaci) \
shared (N,l1,l2,l3,l4,l5,l6,l7,l8,lOutVI,lOutWI,lOutLSWI,lOutNBR2)
for(rc=0;rc<N;rc++){
/*process QAs*/
/*QA cloud: bit 4*/
qac=L8QA_cloud(l8[rc]);
/*QA cloud confidence: bits 5-6*/
qacc=L8QA_cloud_confidence(l8[rc]);
/*QA cloud shadow: bits 7-8*/
qacs=L8QA_cloud_shadow(l8[rc]);
/*QA cirrus confidence: bits 11-12*/
qaci=L8QA_cirrus_confidence(l8[rc]);
/*No Data in this pixel: [UInt16 val == 1] => -32768*/
if(l8[rc]==1){
l1[rc] = 32768;
l2[rc] = 32768;
l3[rc] = 32768;
l4[rc] = 32768;
l5[rc] = 32768;
l6[rc] = 32768;
l7[rc] = 32768;
lOutVI[rc] = 32768;
lOutWI[rc] = 32768;
lOutLSWI[rc] = 32768;
lOutNBR2[rc] = 32768;
/*If clouds, or cloud[shadow][cirrus] confidence QA==[00,01]->[0,1] then mask the pixel*/
}else if(qac == 1 || qacc > 2 || qacs > 2 || qaci > 2){
l1[rc] = 32768;
l2[rc] = 32768;
l3[rc] = 32768;
l4[rc] = 32768;
l5[rc] = 32768;
l6[rc] = 32768;
l7[rc] = 32768;
lOutVI[rc] = 32767;
lOutWI[rc] = 32767;
lOutLSWI[rc] = 32767;
lOutNBR2[rc] = 32767;
/*Finally, all sufficiently less cloud confident or not cloud for sure, use the band pixel value*/
}else{
/*process NDVI*/
if((l5[rc]+l4[rc])==0){
lOutVI[rc]=32768;
}else{
lOutVI[rc]=(int)(10000.0*l5[rc]-l4[rc])/(1.0*l5[rc]+l4[rc]);
}
/*process NDWI*/
if((l6[rc]+l5[rc])==0){
lOutWI[rc]=32768;
}else{
lOutWI[rc]=(int)(10000.0*l6[rc]-l5[rc])/(1.0*l6[rc]+l5[rc]);
}
/*process LSWI*/
if((l6[rc]+l5[rc])==0){
lOutLSWI[rc]=32768;
}else{
lOutLSWI[rc]=(int)(10000.0*l5[rc]-l6[rc])/(1.0*l5[rc]+l6[rc]);
}
/*process NBR2*/
if((l6[rc]+l7[rc])==0){
lOutNBR2[rc]=32768;
}else{
lOutNBR2[rc]=(int)(10000.0*l6[rc]-l7[rc])/(1.0*l6[rc]+l7[rc]);
}
}
}
#pragma omp barrier
GDALRasterIO(hBO1,GF_Write,0,0,nX,nY,l1,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO2,GF_Write,0,0,nX,nY,l2,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO3,GF_Write,0,0,nX,nY,l3,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO4,GF_Write,0,0,nX,nY,l4,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO5,GF_Write,0,0,nX,nY,l5,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO6,GF_Write,0,0,nX,nY,l6,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBO7,GF_Write,0,0,nX,nY,l7,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutVI,GF_Write,0,0,nX,nY,lOutVI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutWI,GF_Write,0,0,nX,nY,lOutWI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutLSWI,GF_Write,0,0,nX,nY,lOutLSWI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutNBR2,GF_Write,0,0,nX,nY,lOutNBR2,nX,nY,GDT_UInt32,0,0);
if( l1 != NULL ) free( l1 );
if( l2 != NULL ) free( l2 );
if( l3 != NULL ) free( l3 );
if( l4 != NULL ) free( l4 );
if( l5 != NULL ) free( l5 );
if( l6 != NULL ) free( l6 );
if( l7 != NULL ) free( l7 );
if( l8 != NULL ) free( l8 );
GDALClose(hDO1);
GDALClose(hDO2);
GDALClose(hDO3);
GDALClose(hDO4);
GDALClose(hDO5);
GDALClose(hDO6);
GDALClose(hDO7);
GDALClose(hD1);
GDALClose(hD2);
GDALClose(hD3);
GDALClose(hD4);
GDALClose(hD5);
GDALClose(hD6);
GDALClose(hD7);
GDALClose(hD8);
GDALClose(hDOutVI);
GDALClose(hDOutWI);
GDALClose(hDOutLSWI);
GDALClose(hDOutNBR2);
return(EXIT_SUCCESS);
}
|
annotate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA N N N N OOO TTTTT AAA TTTTT EEEEE %
% A A NN N NN N O O T A A T E %
% AAAAA N N N N N N O O T AAAAA T EEE %
% A A N NN N NN O O T A A T E %
% A A N N N N OOO T A A T EEEEE %
% %
% %
% MagickCore Image Annotation Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Digital Applications (www.digapp.com) contributed the stroked text algorithm.
% It was written by Leonard Rosenthol.
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/transform.h"
#include "magick/type.h"
#include "magick/utility.h"
#include "magick/xwindow-private.h"
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
#if defined(__MINGW32__)
# undef interface
#endif
#if defined(MAGICKCORE_HAVE_FT2BUILD_H)
# include <ft2build.h>
#endif
#if defined(FT_FREETYPE_H)
# include FT_FREETYPE_H
#else
# include <freetype/freetype.h>
#endif
#if defined(FT_GLYPH_H)
# include FT_GLYPH_H
#else
# include <freetype/ftglyph.h>
#endif
#if defined(FT_OUTLINE_H)
# include FT_OUTLINE_H
#else
# include <freetype/ftoutln.h>
#endif
#if defined(FT_BBOX_H)
# include FT_BBOX_H
#else
# include <freetype/ftbbox.h>
#endif /* defined(FT_BBOX_H) */
#endif
/*
Forward declarations.
*/
static MagickBooleanType
RenderType(Image *,const DrawInfo *,const PointInfo *,TypeMetric *),
RenderPostscript(Image *,const DrawInfo *,const PointInfo *,TypeMetric *),
RenderFreetype(Image *,const DrawInfo *,const char *,const PointInfo *,
TypeMetric *),
RenderX11(Image *,const DrawInfo *,const PointInfo *,TypeMetric *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A n n o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AnnotateImage() annotates an image with text. Optionally you can include
% any of the following bits of information about the image by embedding
% the appropriate special characters:
%
% %b file size in bytes.
% %c comment.
% %d directory in which the image resides.
% %e extension of the image file.
% %f original filename of the image.
% %h height of image.
% %i filename of the image.
% %k number of unique colors.
% %l image label.
% %m image file format.
% %n number of images in a image sequence.
% %o output image filename.
% %p page number of the image.
% %q image depth (8 or 16).
% %q image depth (8 or 16).
% %s image scene number.
% %t image filename without any extension.
% %u a unique temporary filename.
% %w image width.
% %x x resolution of the image.
% %y y resolution of the image.
%
% The format of the AnnotateImage method is:
%
% MagickBooleanType AnnotateImage(Image *image,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
MagickExport MagickBooleanType AnnotateImage(Image *image,
const DrawInfo *draw_info)
{
char
primitive[MaxTextExtent],
**textlist;
DrawInfo
*annotate,
*annotate_info;
GeometryInfo
geometry_info;
MagickBooleanType
status;
PointInfo
offset;
RectangleInfo
geometry;
register long
i;
size_t
length;
TypeMetric
metrics;
unsigned long
height,
number_lines;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (draw_info->text == (char *) NULL)
return(MagickFalse);
if (*draw_info->text == '\0')
return(MagickTrue);
textlist=StringToList(draw_info->text);
if (textlist == (char **) NULL)
return(MagickFalse);
length=strlen(textlist[0]);
for (i=1; textlist[i] != (char *) NULL; i++)
if (strlen(textlist[i]) > length)
length=strlen(textlist[i]);
number_lines=(unsigned long) i;
annotate=CloneDrawInfo((ImageInfo *) NULL,draw_info);
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
SetGeometry(image,&geometry);
SetGeometryInfo(&geometry_info);
if (annotate_info->geometry != (char *) NULL)
{
(void) ParsePageGeometry(image,annotate_info->geometry,&geometry,
&image->exception);
(void) ParseGeometry(annotate_info->geometry,&geometry_info);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (i=0; textlist[i] != (char *) NULL; i++)
{
/*
Position text relative to image.
*/
annotate_info->affine.tx=geometry_info.xi-image->page.x;
annotate_info->affine.ty=geometry_info.psi-image->page.y;
(void) CloneString(&annotate->text,textlist[i]);
(void) GetTypeMetrics(image,annotate,&metrics);
height=(unsigned long) (metrics.ascent-metrics.descent+0.5);
switch (annotate->gravity)
{
case UndefinedGravity:
default:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height;
break;
}
case NorthWestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height+annotate_info->affine.ry*
(metrics.ascent+metrics.descent);
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent;
break;
}
case NorthGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)/2.0+
annotate_info->affine.ry*(metrics.ascent+metrics.descent);
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent-annotate_info->affine.rx*(metrics.width-
metrics.bounds.x1)/2.0;
break;
}
case NorthEastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)+
annotate_info->affine.ry*(metrics.ascent+metrics.descent);
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent-annotate_info->affine.rx*(metrics.width-
metrics.bounds.x1);
break;
}
case WestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height+annotate_info->affine.ry*
(metrics.ascent+metrics.descent-(number_lines-1.0)*height)/2.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height+
annotate_info->affine.sy*(metrics.ascent+metrics.descent-
(number_lines-1.0)*height)/2.0;
break;
}
case StaticGravity:
case CenterGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)/2.0+
annotate_info->affine.ry*(metrics.ascent+metrics.descent-
(number_lines-1)*height)/2.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1)/2.0+
annotate_info->affine.sy*(metrics.ascent+metrics.descent-
(number_lines-1.0)*height)/2.0;
break;
}
case EastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)+
annotate_info->affine.ry*(metrics.ascent+metrics.descent-
(number_lines-1.0)*height)/2.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1)+
annotate_info->affine.sy*(metrics.ascent+metrics.descent-
(number_lines-1.0)*height)/2.0;
break;
}
case SouthWestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height-annotate_info->affine.ry*
(number_lines-1.0)*height;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.sy*(number_lines-1.0)*height+metrics.descent;
break;
}
case SouthGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)/2.0-
annotate_info->affine.ry*(number_lines-1.0)*height/2.0;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1)/2.0-
annotate_info->affine.sy*(number_lines-1.0)*height+metrics.descent;
break;
}
case SouthEastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)-
annotate_info->affine.ry*(number_lines-1.0)*height;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1)-
annotate_info->affine.sy*(number_lines-1.0)*height+metrics.descent;
break;
}
}
switch (annotate->align)
{
case LeftAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height;
break;
}
case CenterAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1)/2.0;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1)/2.0;
break;
}
case RightAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*(metrics.width+metrics.bounds.x1);
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*(metrics.width+metrics.bounds.x1);
break;
}
default:
break;
}
if (draw_info->undercolor.opacity != TransparentOpacity)
{
DrawInfo
*undercolor_info;
/*
Text box.
*/
undercolor_info=CloneDrawInfo((ImageInfo *) NULL,(DrawInfo *) NULL);
undercolor_info->fill=draw_info->undercolor;
undercolor_info->affine=draw_info->affine;
undercolor_info->affine.tx=offset.x-draw_info->affine.ry*metrics.ascent;
undercolor_info->affine.ty=offset.y-draw_info->affine.sy*metrics.ascent;
(void) FormatMagickString(primitive,MaxTextExtent,
"rectangle 0,0 %g,%ld",metrics.origin.x,height);
(void) CloneString(&undercolor_info->primitive,primitive);
(void) DrawImage(image,undercolor_info);
(void) DestroyDrawInfo(undercolor_info);
}
annotate_info->affine.tx=offset.x;
annotate_info->affine.ty=offset.y;
(void) FormatMagickString(primitive,MaxTextExtent,"stroke-width %g "
"line 0,0 %g,0",metrics.underline_thickness,metrics.width);
if (annotate->decorate == OverlineDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*(metrics.ascent+
metrics.descent-metrics.underline_position));
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
else
if (annotate->decorate == UnderlineDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*
metrics.underline_position);
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
/*
Annotate image with text.
*/
status=RenderType(image,annotate,&offset,&metrics);
if (status == MagickFalse)
break;
if (annotate->decorate == LineThroughDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*(height+
metrics.underline_position+metrics.descent)/2.0);
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
}
/*
Relinquish resources.
*/
annotate_info=DestroyDrawInfo(annotate_info);
annotate=DestroyDrawInfo(annotate);
for (i=0; textlist[i] != (char *) NULL; i++)
textlist[i]=DestroyString(textlist[i]);
textlist=(char **) RelinquishMagickMemory(textlist);
return(status);
}
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E n c o d e S J I S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeSJIS() converts an ASCII text string to 2-bytes per character code
% (like UCS-2). Returns the translated codes and the character count.
% Characters under 0x7f are just copied, characters over 0x80 are tied with
% the next character.
%
% Katsutoshi Shibuya contributed this method.
%
% The format of the EncodeSJIS function is:
%
% encoding=EncodeSJIS(const Image *image,const char *text,size_t count)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o text: the text.
%
% o count: return the number of characters generated by the encoding.
%
*/
static int GetOneCharacter(const unsigned char *text,size_t *length)
{
int
c;
if (*length < 1)
return(-1);
c=text[0];
if ((c & 0x80) == 0)
{
*length=1;
return((int) c);
}
if (*length < 2)
{
*length=0;
return(-1);
}
*length=2;
c=((int) (text[0]) << 8);
c|=text[1];
return((int) c);
}
static unsigned long *EncodeSJIS(const char *text,size_t *count)
{
int
c;
register const char
*p;
register unsigned long
*q;
size_t
length;
unsigned long
*encoding;
*count=0;
if ((text == (char *) NULL) || (*text == '\0'))
return((unsigned long *) NULL);
length=strlen(text);
if (~length < MaxTextExtent)
return((unsigned long *) NULL);
length+=MaxTextExtent;
encoding=(unsigned long *) AcquireQuantumMemory(length,sizeof(*encoding));
if (encoding == (unsigned long *) NULL)
return((unsigned long *) NULL);
q=encoding;
for (p=text; *p != '\0'; p+=length)
{
length=strlen(p);
c=GetOneCharacter((const unsigned char *) p,&length);
if (c < 0)
{
q=encoding;
for (p=text; *p != '\0'; p++)
*q++=(unsigned char) *p;
break;
}
*q=(unsigned long) c;
q++;
}
*count=q-encoding;
return(encoding);
}
#endif
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E n c o d e T e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeText() converts an ASCII text string to wide text and returns the
% translation and the character count.
%
% The format of the EncodeText function is:
%
% encoding=EncodeText(const char *text,size_t count)
%
% A description of each parameter follows:
%
% o text: the text.
%
% o count: return the number of characters generated by the encoding.
%
*/
static unsigned long *EncodeText(const char *text,size_t *count)
{
register const char
*p;
register unsigned long
*q;
size_t
length;
unsigned long
*encoding;
*count=0;
if ((text == (char *) NULL) || (*text == '\0'))
return((unsigned long *) NULL);
length=strlen(text);
if (~length < MaxTextExtent)
return((unsigned long *) NULL);
length+=MaxTextExtent;
encoding=(unsigned long *) AcquireQuantumMemory(length,sizeof(*encoding));
if (encoding == (unsigned long *) NULL)
return((unsigned long *) NULL);
q=encoding;
for (p=text; *p != '\0'; p++)
*q++=(unsigned char) *p;
*count=q-encoding;
return(encoding);
}
#endif
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E n c o d e U n i c o d e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeUnicode() converts an ASCII text string to Unicode and returns the
% Unicode translation and the character count. Characters under 0x7f are
% just copied, characters over 0x80 are tied with the next character.
%
% The format of the EncodeUnicode function is:
%
% unsigned short *EncodeUnicode(const unsigned char *text,size_t count)
%
% A description of each parameter follows:
%
% o unicode: EncodeUnicode() returns a pointer to an unsigned short array
% representing the encoded version of the ASCII string.
%
% o text: the text.
%
% o count: return the number of characters generated by the encoding.
%
*/
static long GetUnicodeCharacter(const unsigned char *text,size_t *length)
{
unsigned long
c;
if (*length < 1)
return(-1);
c=text[0];
if ((c & 0x80) == 0)
{
*length=1;
return((long) c);
}
if ((*length < 2) || ((text[1] & 0xc0) != 0x80))
{
*length=0;
return(-1);
}
if ((c & 0xe0) != 0xe0)
{
*length=2;
c=(text[0] & 0x1f) << 6;
c|=text[1] & 0x3f;
return((long) c);
}
if ((*length < 3) || ((text[2] & 0xc0) != 0x80))
{
*length=0;
return(-1);
}
if ((c & 0xf0) != 0xf0)
{
*length=3;
c=(text[0] & 0xf) << 12;
c|=(text[1] & 0x3f) << 6;
c|=text[2] & 0x3f;
return((long) c);
}
if ((*length < 4) || ((c & 0xf8) != 0xf0) || ((text[3] & 0xc0) != 0x80))
{
*length=0;
return(-1);
}
*length=4;
c=(text[0] & 0x7) << 18;
c|=(text[1] & 0x3f) << 12;
c|=(text[2] & 0x3f) << 6;
c|=text[3] & 0x3f;
return((long) c);
}
static unsigned long *EncodeUnicode(const char *text,size_t *count)
{
long
c;
register const char
*p;
register unsigned long
*q;
size_t
length;
unsigned long
*unicode;
*count=0;
if ((text == (char *) NULL) || (*text == '\0'))
return((unsigned long *) NULL);
length=strlen(text);
if (~length < MaxTextExtent)
return((unsigned long *) NULL);
length+=MaxTextExtent;
unicode=(unsigned long *) AcquireQuantumMemory(length,sizeof(*unicode));
if (unicode == (unsigned long *) NULL)
return((unsigned long *) NULL);
q=unicode;
for (p=text; *p != '\0'; p+=length)
{
length=strlen(p);
c=GetUnicodeCharacter((const unsigned char *) p,&length);
if (c < 0)
{
q=unicode;
for (p=text; *p != '\0'; p++)
*q++=(unsigned char) *p;
break;
}
*q=(unsigned long) c;
q++;
}
*count=q-unicode;
return(unicode);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t M a g i c k C a p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatMagickCaption() formats a caption so that it fits within the image
% width. It returns the number of lines in the formatted caption.
%
% The format of the FormatMagickCaption method is:
%
% long FormatMagickCaption(Image *image,DrawInfo *draw_info,char *caption,
% TypeMetric *metrics)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o draw_info: the draw info.
%
% o metrics: Return the font metrics in this structure.
%
*/
MagickExport long FormatMagickCaption(Image *image,DrawInfo *draw_info,
char *caption,TypeMetric *metrics)
{
MagickBooleanType
status;
register char
*p,
*q,
*s;
register long
i;
unsigned long
width;
q=draw_info->text;
s=(char *) NULL;
for (p=caption; *p != '\0'; p++)
{
if (isspace((int) ((unsigned char) *p)) != 0)
s=p;
*q++=(*p);
*q='\0';
status=GetTypeMetrics(image,draw_info,metrics);
if (status == MagickFalse)
break;
width=(unsigned long) (metrics->width+0.5);
if (*p != '\n')
if (width <= image->columns)
continue;
if (s == (char *) NULL)
{
s=p;
while ((isspace((int) ((unsigned char) *s)) == 0) && (*s != '\0'))
s++;
}
if (*s != '\0')
{
*s='\n';
p=s;
s=(char *) NULL;
}
q=draw_info->text;
}
i=0;
for (p=caption; *p != '\0'; p++)
if (*p == '\n')
i++;
return(i);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M u l t i l i n e T y p e M e t r i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMultilineTypeMetrics() returns the following information for the
% specified font and text:
%
% character width
% character height
% ascender
% descender
% text width
% text height
% maximum horizontal advance
% bounds: x1
% bounds: y1
% bounds: x2
% bounds: y2
% origin: x
% origin: y
% underline position
% underline thickness
%
% This method is like GetTypeMetrics() but it returns the maximum text width
% and height for multiple lines of text.
%
% The format of the GetMultilineTypeMetrics method is:
%
% MagickBooleanType GetMultilineTypeMetrics(Image *image,
% const DrawInfo *draw_info,TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o metrics: Return the font metrics in this structure.
%
*/
MagickExport MagickBooleanType GetMultilineTypeMetrics(Image *image,
const DrawInfo *draw_info,TypeMetric *metrics)
{
char
**textlist;
DrawInfo
*annotate_info;
MagickBooleanType
status;
register long
i;
TypeMetric
extent;
unsigned long
number_lines;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->text != (char *) NULL);
assert(draw_info->signature == MagickSignature);
if (*draw_info->text == '\0')
return(MagickFalse);
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
annotate_info->text=DestroyString(annotate_info->text);
/*
Convert newlines to multiple lines of text.
*/
textlist=StringToList(draw_info->text);
if (textlist == (char **) NULL)
return(MagickFalse);
annotate_info->render=MagickFalse;
(void) ResetMagickMemory(metrics,0,sizeof(*metrics));
(void) ResetMagickMemory(&extent,0,sizeof(extent));
/*
Find the widest of the text lines.
*/
annotate_info->text=textlist[0];
status=GetTypeMetrics(image,annotate_info,&extent);
*metrics=extent;
for (i=1; textlist[i] != (char *) NULL; i++)
{
annotate_info->text=textlist[i];
status=GetTypeMetrics(image,annotate_info,&extent);
if (extent.width > metrics->width)
*metrics=extent;
}
number_lines=(unsigned long) i;
metrics->height=(double) number_lines*(long) (metrics->ascent-
metrics->descent+0.5);
/*
Relinquish resources.
*/
annotate_info->text=(char *) NULL;
annotate_info=DestroyDrawInfo(annotate_info);
for (i=0; textlist[i] != (char *) NULL; i++)
textlist[i]=DestroyString(textlist[i]);
textlist=(char **) RelinquishMagickMemory(textlist);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T y p e M e t r i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetTypeMetrics() returns the following information for the specified font
% and text:
%
% character width
% character height
% ascender
% descender
% text width
% text height
% maximum horizontal advance
% bounds: x1
% bounds: y1
% bounds: x2
% bounds: y2
% origin: x
% origin: y
% underline position
% underline thickness
%
% The format of the GetTypeMetrics method is:
%
% MagickBooleanType GetTypeMetrics(Image *image,const DrawInfo *draw_info,
% TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o metrics: Return the font metrics in this structure.
%
*/
MagickExport MagickBooleanType GetTypeMetrics(Image *image,
const DrawInfo *draw_info,TypeMetric *metrics)
{
DrawInfo
*annotate_info;
MagickBooleanType
status;
PointInfo
offset;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->text != (char *) NULL);
assert(draw_info->signature == MagickSignature);
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
annotate_info->render=MagickFalse;
(void) ResetMagickMemory(metrics,0,sizeof(*metrics));
offset.x=0.0;
offset.y=0.0;
status=RenderType(image,annotate_info,&offset,metrics);
if (image->debug != MagickFalse)
(void) LogMagickEvent(AnnotateEvent,GetMagickModule(),"Metrics: text: %s; "
"width: %g; height: %g; ascent: %g; descent: %g; max advance: %g; "
"bounds: %g,%g %g,%g; origin: %g,%g; pixels per em: %g,%g; "
"underline position: %g; underline thickness: %g",annotate_info->text,
metrics->width,metrics->height,metrics->ascent,metrics->descent,
metrics->max_advance,metrics->bounds.x1,metrics->bounds.y1,
metrics->bounds.x2,metrics->bounds.y2,metrics->origin.x,metrics->origin.y,
metrics->pixels_per_em.x,metrics->pixels_per_em.y,
metrics->underline_position,metrics->underline_thickness);
annotate_info=DestroyDrawInfo(annotate_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e n d e r T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RenderType() renders text on the image. It also returns the bounding box of
% the text relative to the image.
%
% The format of the RenderType method is:
%
% MagickBooleanType RenderType(Image *image,DrawInfo *draw_info,
% const PointInfo *offset,TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o offset: (x,y) location of text relative to image.
%
% o metrics: bounding box of text.
%
*/
static MagickBooleanType RenderType(Image *image,const DrawInfo *draw_info,
const PointInfo *offset,TypeMetric *metrics)
{
const TypeInfo
*type_info;
DrawInfo
*annotate_info;
MagickBooleanType
status;
type_info=(const TypeInfo *) NULL;
if (draw_info->font != (char *) NULL)
{
if (*draw_info->font == '@')
return(RenderFreetype(image,draw_info,draw_info->encoding,offset,metrics));
if (*draw_info->font == '-')
return(RenderX11(image,draw_info,offset,metrics));
if (IsPathAccessible(draw_info->font) != MagickFalse)
return(RenderFreetype(image,draw_info,draw_info->encoding,offset,metrics));
type_info=GetTypeInfo(draw_info->font,&image->exception);
if (type_info == (const TypeInfo *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
TypeWarning,"UnableToReadFont","`%s'",draw_info->font);
}
if ((type_info == (const TypeInfo *) NULL) &&
(draw_info->family != (const char *) NULL))
{
type_info=GetTypeInfoByFamily(draw_info->family,draw_info->style,
draw_info->stretch,draw_info->weight,&image->exception);
if (type_info == (const TypeInfo *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
TypeWarning,"UnableToReadFont","`%s'",draw_info->family);
}
if (type_info == (const TypeInfo *) NULL)
type_info=GetTypeInfoByFamily("arial",draw_info->style,
draw_info->stretch,draw_info->weight,&image->exception);
if (type_info == (const TypeInfo *) NULL)
type_info=GetTypeInfoByFamily("helvetica",draw_info->style,
draw_info->stretch,draw_info->weight,&image->exception);
if (type_info == (const TypeInfo *) NULL)
type_info=GetTypeInfoByFamily((const char *) NULL,draw_info->style,
draw_info->stretch,draw_info->weight,&image->exception);
if (type_info == (const TypeInfo *) NULL)
{
status=RenderFreetype(image,draw_info,draw_info->encoding,offset,
metrics);
return(status);
}
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
annotate_info->face=type_info->face;
if (type_info->metrics != (char *) NULL)
(void) CloneString(&annotate_info->metrics,type_info->metrics);
if (type_info->glyphs != (char *) NULL)
(void) CloneString(&annotate_info->font,type_info->glyphs);
status=RenderFreetype(image,annotate_info,type_info->encoding,offset,metrics);
annotate_info=DestroyDrawInfo(annotate_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e n d e r F r e e t y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RenderFreetype() renders text on the image with a Truetype font. It also
% returns the bounding box of the text relative to the image.
%
% The format of the RenderFreetype method is:
%
% MagickBooleanType RenderFreetype(Image *image,DrawInfo *draw_info,
% const char *encoding,const PointInfo *offset,TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o encoding: the font encoding.
%
% o offset: (x,y) location of text relative to image.
%
% o metrics: bounding box of text.
%
*/
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
static int TraceCubicBezier(FT_Vector *p,FT_Vector *q,FT_Vector *to,
DrawInfo *draw_info)
{
AffineMatrix
affine;
char
path[MaxTextExtent];
affine=draw_info->affine;
(void) FormatMagickString(path,MaxTextExtent,"C%g,%g %g,%g %g,%g",
affine.tx+p->x/64.0,affine.ty-p->y/64.0,affine.tx+q->x/64.0,
affine.ty-q->y/64.0,affine.tx+to->x/64.0,affine.ty-to->y/64.0);
(void) ConcatenateString(&draw_info->primitive,path);
return(0);
}
static int TraceLineTo(FT_Vector *to,DrawInfo *draw_info)
{
AffineMatrix
affine;
char
path[MaxTextExtent];
affine=draw_info->affine;
(void) FormatMagickString(path,MaxTextExtent,"L%g,%g",affine.tx+to->x/64.0,
affine.ty-to->y/64.0);
(void) ConcatenateString(&draw_info->primitive,path);
return(0);
}
static int TraceMoveTo(FT_Vector *to,DrawInfo *draw_info)
{
AffineMatrix
affine;
char
path[MaxTextExtent];
affine=draw_info->affine;
(void) FormatMagickString(path,MaxTextExtent,"M%g,%g",affine.tx+to->x/64.0,
affine.ty-to->y/64.0);
(void) ConcatenateString(&draw_info->primitive,path);
return(0);
}
static int TraceQuadraticBezier(FT_Vector *control,FT_Vector *to,
DrawInfo *draw_info)
{
AffineMatrix
affine;
char
path[MaxTextExtent];
affine=draw_info->affine;
(void) FormatMagickString(path,MaxTextExtent,"Q%g,%g %g,%g",
affine.tx+control->x/64.0,affine.ty-control->y/64.0,affine.tx+to->x/64.0,
affine.ty-to->y/64.0);
(void) ConcatenateString(&draw_info->primitive,path);
return(0);
}
static MagickBooleanType RenderFreetype(Image *image,const DrawInfo *draw_info,
const char *encoding,const PointInfo *offset,TypeMetric *metrics)
{
#if !defined(FT_OPEN_PATHNAME)
#define FT_OPEN_PATHNAME ft_open_pathname
#endif
typedef struct _GlyphInfo
{
FT_UInt
id;
FT_Vector
origin;
FT_Glyph
image;
} GlyphInfo;
const char
*value;
DrawInfo
*annotate_info;
FT_BBox
bounds;
FT_BitmapGlyph
bitmap;
FT_Encoding
encoding_type;
FT_Error
status;
FT_Face
face;
FT_Int32
flags;
FT_Library
library;
FT_Matrix
affine;
FT_Open_Args
args;
FT_Vector
origin;
GlyphInfo
glyph,
last_glyph;
long
y;
PointInfo
point,
resolution;
register long
i;
size_t
length;
static FT_Outline_Funcs
OutlineMethods =
{
(FT_Outline_MoveTo_Func) TraceMoveTo,
(FT_Outline_LineTo_Func) TraceLineTo,
(FT_Outline_ConicTo_Func) TraceQuadraticBezier,
(FT_Outline_CubicTo_Func) TraceCubicBezier,
0, 0
};
unsigned long
*text;
/*
Initialize Truetype library.
*/
status=FT_Init_FreeType(&library);
if (status != 0)
ThrowBinaryException(TypeError,"UnableToInitializeFreetypeLibrary",
image->filename);
args.flags=FT_OPEN_PATHNAME;
if (draw_info->font == (char *) NULL)
args.pathname=(char *) "helvetica";
else
if (*draw_info->font != '@')
args.pathname=draw_info->font;
else
args.pathname=draw_info->font+1;
face=(FT_Face) NULL;
status=FT_Open_Face(library,&args,draw_info->face,&face);
if (status != 0)
{
(void) FT_Done_FreeType(library);
(void) ThrowMagickException(&image->exception,GetMagickModule(),
TypeError,"UnableToReadFont","`%s'",draw_info->font);
return(RenderPostscript(image,draw_info,offset,metrics));
}
if ((draw_info->metrics != (char *) NULL) &&
(IsPathAccessible(draw_info->metrics) != MagickFalse))
(void) FT_Attach_File(face,draw_info->metrics);
encoding_type=ft_encoding_unicode;
status=FT_Select_Charmap(face,encoding_type);
if ((status != 0) && (face->num_charmaps != 0))
status=FT_Set_Charmap(face,face->charmaps[0]);
if (encoding != (const char *) NULL)
{
if (LocaleCompare(encoding,"AdobeCustom") == 0)
encoding_type=ft_encoding_adobe_custom;
if (LocaleCompare(encoding,"AdobeExpert") == 0)
encoding_type=ft_encoding_adobe_expert;
if (LocaleCompare(encoding,"AdobeStandard") == 0)
encoding_type=ft_encoding_adobe_standard;
if (LocaleCompare(encoding,"AppleRoman") == 0)
encoding_type=ft_encoding_apple_roman;
if (LocaleCompare(encoding,"BIG5") == 0)
encoding_type=ft_encoding_big5;
if (LocaleCompare(encoding,"GB2312") == 0)
encoding_type=ft_encoding_gb2312;
if (LocaleCompare(encoding,"Johab") == 0)
encoding_type=ft_encoding_johab;
#if defined(ft_encoding_latin_1)
if (LocaleCompare(encoding,"Latin-1") == 0)
encoding_type=ft_encoding_latin_1;
#endif
if (LocaleCompare(encoding,"Latin-2") == 0)
encoding_type=ft_encoding_latin_2;
if (LocaleCompare(encoding,"None") == 0)
encoding_type=ft_encoding_none;
if (LocaleCompare(encoding,"SJIScode") == 0)
encoding_type=ft_encoding_sjis;
if (LocaleCompare(encoding,"Symbol") == 0)
encoding_type=ft_encoding_symbol;
if (LocaleCompare(encoding,"Unicode") == 0)
encoding_type=ft_encoding_unicode;
if (LocaleCompare(encoding,"Wansung") == 0)
encoding_type=ft_encoding_wansung;
status=FT_Select_Charmap(face,encoding_type);
if (status != 0)
ThrowBinaryException(TypeError,"UnrecognizedFontEncoding",encoding);
}
/*
Set text size.
*/
resolution.x=DefaultResolution;
resolution.y=DefaultResolution;
if (draw_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(draw_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
status=FT_Set_Char_Size(face,(FT_F26Dot6) (64.0*draw_info->pointsize),
(FT_F26Dot6) (64.0*draw_info->pointsize),(FT_UInt) resolution.x,
(FT_UInt) resolution.y);
metrics->pixels_per_em.x=face->size->metrics.x_ppem;
metrics->pixels_per_em.y=face->size->metrics.y_ppem;
metrics->ascent=(double) face->size->metrics.ascender/64.0;
metrics->descent=(double) face->size->metrics.descender/64.0;
metrics->width=0;
metrics->origin.x=0;
metrics->origin.y=0;
metrics->height=(double) face->size->metrics.height/64.0;
metrics->max_advance=0.0;
if (face->size->metrics.max_advance > MagickEpsilon)
metrics->max_advance=(double) face->size->metrics.max_advance/64.0;
metrics->bounds.x1=0.0;
metrics->bounds.y1=metrics->descent;
metrics->bounds.x2=metrics->ascent+metrics->descent;
metrics->bounds.y2=metrics->ascent+metrics->descent;
metrics->underline_position=face->underline_position/64.0;
metrics->underline_thickness=face->underline_thickness/64.0;
if (*draw_info->text == '\0')
{
(void) FT_Done_Face(face);
(void) FT_Done_FreeType(library);
return(MagickTrue);
}
/*
Convert text to 2-byte format as prescribed by the encoding.
*/
switch (encoding_type)
{
case ft_encoding_sjis:
{
text=EncodeSJIS(draw_info->text,&length);
break;
}
case ft_encoding_unicode:
{
text=EncodeUnicode(draw_info->text,&length);
break;
}
default:
{
if (draw_info->encoding != (char *) NULL)
{
if (LocaleCompare(draw_info->encoding,"SJIS") == 0)
{
text=EncodeSJIS(draw_info->text,&length);
break;
}
if ((LocaleCompare(draw_info->encoding,"UTF-8") == 0) ||
(encoding_type != ft_encoding_none))
{
text=EncodeUnicode(draw_info->text,&length);
break;
}
}
if (encoding_type == ft_encoding_unicode)
{
text=EncodeUnicode(draw_info->text,&length);
break;
}
text=EncodeText(draw_info->text,&length);
break;
}
}
if (text == (unsigned long *) NULL)
{
(void) FT_Done_Face(face);
(void) FT_Done_FreeType(library);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Compute bounding box.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(AnnotateEvent,GetMagickModule(),"Font %s; "
"font-encoding %s; text-encoding %s; pointsize %g",
draw_info->font != (char *) NULL ? draw_info->font : "none",
encoding != (char *) NULL ? encoding : "none",
draw_info->encoding != (char *) NULL ? draw_info->encoding : "none",
draw_info->pointsize);
flags=FT_LOAD_NO_BITMAP;
value=GetImageProperty(image,"type:hinting");
if (LocaleCompare(value,"off") == 0)
flags|=FT_LOAD_NO_HINTING;
glyph.id=0;
glyph.image=NULL;
last_glyph.id=0;
last_glyph.image=NULL;
origin.x=0;
origin.y=0;
affine.xx=65536L;
affine.yx=0L;
affine.xy=0L;
affine.yy=65536L;
if (draw_info->render != MagickFalse)
{
affine.xx=(FT_Fixed) (65536L*draw_info->affine.sx+0.5);
affine.yx=(FT_Fixed) (-65536L*draw_info->affine.rx+0.5);
affine.xy=(FT_Fixed) (-65536L*draw_info->affine.ry+0.5);
affine.yy=(FT_Fixed) (65536L*draw_info->affine.sy+0.5);
}
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->primitive,"path '");
if (draw_info->render != MagickFalse)
{
if (image->storage_class != DirectClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
}
point.x=0.0;
point.y=0.0;
for (i=0; i < (long) length; i++)
{
glyph.id=FT_Get_Char_Index(face,text[i]);
if (glyph.id == 0)
glyph.id=FT_Get_Char_Index(face,'?');
if ((glyph.id != 0) && (last_glyph.id != 0))
{
if (draw_info->kerning != 0.0)
origin.x+=64.0*draw_info->kerning;
else
if (FT_HAS_KERNING(face))
{
FT_Vector
kerning;
status=FT_Get_Kerning(face,last_glyph.id,glyph.id,
ft_kerning_default,&kerning);
if (status == 0)
origin.x+=kerning.x;
}
}
glyph.origin=origin;
status=FT_Load_Glyph(face,glyph.id,flags);
if (status != 0)
continue;
status=FT_Get_Glyph(face->glyph,&glyph.image);
if (status != 0)
continue;
status=FT_Outline_Get_BBox(&((FT_OutlineGlyph) glyph.image)->outline,
&bounds);
if (status != 0)
continue;
if ((i == 0) || (bounds.xMin < metrics->bounds.x1))
metrics->bounds.x1=bounds.xMin;
if ((i == 0) || (bounds.yMin < metrics->bounds.y1))
metrics->bounds.y1=bounds.yMin;
if ((i == 0) || (bounds.xMax > metrics->bounds.x2))
metrics->bounds.x2=bounds.xMax;
if ((i == 0) || (bounds.yMax > metrics->bounds.y2))
metrics->bounds.y2=bounds.yMax;
if (draw_info->render != MagickFalse)
if ((draw_info->stroke.opacity != TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL))
{
/*
Trace the glyph.
*/
annotate_info->affine.tx=glyph.origin.x/64.0;
annotate_info->affine.ty=glyph.origin.y/64.0;
(void) FT_Outline_Decompose(&((FT_OutlineGlyph) glyph.image)->outline,
&OutlineMethods,annotate_info);
}
FT_Vector_Transform(&glyph.origin,&affine);
(void) FT_Glyph_Transform(glyph.image,&affine,&glyph.origin);
status=FT_Glyph_To_Bitmap(&glyph.image,ft_render_mode_normal,
(FT_Vector *) NULL,MagickTrue);
if (status != 0)
continue;
bitmap=(FT_BitmapGlyph) glyph.image;
point.x=offset->x+bitmap->left;
point.y=offset->y-bitmap->top;
if (draw_info->render != MagickFalse)
{
ExceptionInfo
*exception;
MagickBooleanType
status;
ViewInfo
*image_view;
/*
Rasterize the glyph.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (long) bitmap->bitmap.rows; y++)
{
long
x_offset,
y_offset;
MagickBooleanType
active,
sync;
MagickRealType
fill_opacity;
PixelPacket
fill_color;
register long
x;
register PixelPacket
*q;
register unsigned char
*p;
if (status == MagickFalse)
continue;
x_offset=(long) (point.x+0.5);
y_offset=(long) (point.y+y+0.5);
if ((y_offset < 0) || (y_offset >= (long) image->rows))
continue;
q=GetCacheViewAuthenticPixels(image_view,x_offset,y_offset,
bitmap->bitmap.width,1,exception);
active=q != (PixelPacket *) NULL ? MagickTrue : MagickFalse;
p=bitmap->bitmap.buffer+y*bitmap->bitmap.width;
for (x=0; x < (long) bitmap->bitmap.width; x++)
{
x_offset++;
if ((*p == 0) || (x_offset < 0) ||
(x_offset >= (long) image->columns))
{
p++;
q++;
continue;
}
fill_opacity=(MagickRealType) (*p)/255.0;
if (draw_info->text_antialias == MagickFalse)
fill_opacity=fill_opacity > 0.5 ? 1.0 : 0.0;
if (active == MagickFalse)
q=GetCacheViewAuthenticPixels(image_view,x_offset,y_offset,1,1,
exception);
if (q == (PixelPacket *) NULL)
{
p++;
q++;
continue;
}
(void) GetFillColor(draw_info,x_offset,y_offset,&fill_color);
fill_opacity=QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity);
MagickCompositeOver(&fill_color,fill_opacity,q,q->opacity,q);
if (active == MagickFalse)
{
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
if ((bitmap->left+bitmap->bitmap.width) > metrics->width)
metrics->width=bitmap->left+bitmap->bitmap.width;
origin.x+=face->glyph->advance.x;
metrics->origin.x=origin.x;
metrics->origin.y=origin.y;
if (last_glyph.id != 0)
FT_Done_Glyph(last_glyph.image);
last_glyph=glyph;
}
if (last_glyph.id != 0)
FT_Done_Glyph(last_glyph.image);
if ((draw_info->stroke.opacity != TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL))
{
if (draw_info->render != MagickFalse)
{
/*
Draw text stroke.
*/
annotate_info->affine.tx=offset->x;
annotate_info->affine.ty=offset->y;
(void) ConcatenateString(&annotate_info->primitive,"'");
(void) DrawImage(image,annotate_info);
}
}
/*
Determine font metrics.
*/
glyph.id=FT_Get_Char_Index(face,'_');
glyph.origin=origin;
status=FT_Load_Glyph(face,glyph.id,flags);
if (status == 0)
{
status=FT_Get_Glyph(face->glyph,&glyph.image);
if (status == 0)
{
status=FT_Outline_Get_BBox(&((FT_OutlineGlyph) glyph.image)->
outline,&bounds);
if (status == 0)
{
FT_Vector_Transform(&glyph.origin,&affine);
(void) FT_Glyph_Transform(glyph.image,&affine,&glyph.origin);
status=FT_Glyph_To_Bitmap(&glyph.image,ft_render_mode_normal,
(FT_Vector *) NULL,MagickTrue);
bitmap=(FT_BitmapGlyph) glyph.image;
if (bitmap->left > metrics->width)
metrics->width=bitmap->left;
}
}
if (glyph.id != 0)
FT_Done_Glyph(glyph.image);
}
metrics->width-=metrics->bounds.x1/64.0;
metrics->bounds.x1/=64.0;
metrics->bounds.y1/=64.0;
metrics->bounds.x2/=64.0;
metrics->bounds.y2/=64.0;
metrics->origin.x/=64.0;
metrics->origin.y/=64.0;
/*
Relinquish resources.
*/
text=(unsigned long *) RelinquishMagickMemory(text);
annotate_info=DestroyDrawInfo(annotate_info);
(void) FT_Done_Face(face);
(void) FT_Done_FreeType(library);
return(MagickTrue);
}
#else
static MagickBooleanType RenderFreetype(Image *image,const DrawInfo *draw_info,
const char *magick_unused(encoding),const PointInfo *offset,
TypeMetric *metrics)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (Freetype)",
draw_info->font);
return(RenderPostscript(image,draw_info,offset,metrics));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e n d e r P o s t s c r i p t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RenderPostscript() renders text on the image with a Postscript font. It
% also returns the bounding box of the text relative to the image.
%
% The format of the RenderPostscript method is:
%
% MagickBooleanType RenderPostscript(Image *image,DrawInfo *draw_info,
% const PointInfo *offset,TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o offset: (x,y) location of text relative to image.
%
% o metrics: bounding box of text.
%
*/
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
static char *EscapeParenthesis(const char *text)
{
char
*buffer;
register char
*p;
register long
i;
size_t
escapes;
escapes=0;
buffer=AcquireString(text);
p=buffer;
for (i=0; i < (long) MagickMin(strlen(text),MaxTextExtent-escapes-1); i++)
{
if ((text[i] == '(') || (text[i] == ')'))
{
*p++='\\';
escapes++;
}
*p++=text[i];
}
*p='\0';
return(buffer);
}
static MagickBooleanType RenderPostscript(Image *image,
const DrawInfo *draw_info,const PointInfo *offset,TypeMetric *metrics)
{
char
filename[MaxTextExtent],
geometry[MaxTextExtent],
*text;
FILE
*file;
Image
*annotate_image;
ImageInfo
*annotate_info;
int
unique_file;
long
y;
MagickBooleanType
identity;
PointInfo
extent,
point,
resolution;
register long
i,
x;
register PixelPacket
*q;
/*
Render label with a Postscript font.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(AnnotateEvent,GetMagickModule(),
"Font %s; pointsize %g",draw_info->font != (char *) NULL ?
draw_info->font : "none",draw_info->pointsize);
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile",
filename);
return(MagickFalse);
}
(void) fprintf(file,"%%!PS-Adobe-3.0\n");
(void) fprintf(file,"/ReencodeType\n");
(void) fprintf(file,"{\n");
(void) fprintf(file," findfont dup length\n");
(void) fprintf(file,
" dict begin { 1 index /FID ne {def} {pop pop} ifelse } forall\n");
(void) fprintf(file,
" /Encoding ISOLatin1Encoding def currentdict end definefont pop\n");
(void) fprintf(file,"} bind def\n");
/*
Sample to compute bounding box.
*/
identity=(draw_info->affine.sx == draw_info->affine.sy) &&
(draw_info->affine.rx == 0.0) && (draw_info->affine.ry == 0.0) ?
MagickTrue : MagickFalse;
extent.x=0.0;
extent.y=0.0;
for (i=0; i <= (long) (strlen(draw_info->text)+2); i++)
{
point.x=fabs(draw_info->affine.sx*i*draw_info->pointsize+
draw_info->affine.ry*2.0*draw_info->pointsize);
point.y=fabs(draw_info->affine.rx*i*draw_info->pointsize+
draw_info->affine.sy*2.0*draw_info->pointsize);
if (point.x > extent.x)
extent.x=point.x;
if (point.y > extent.y)
extent.y=point.y;
}
(void) fprintf(file,"%g %g moveto\n",identity != MagickFalse ? 0.0 :
extent.x/2.0,extent.y/2.0);
(void) fprintf(file,"%g %g scale\n",draw_info->pointsize,
draw_info->pointsize);
if ((draw_info->font == (char *) NULL) || (*draw_info->font == '\0') ||
(strchr(draw_info->font,'/') != (char *) NULL))
(void) fprintf(file,
"/Times-Roman-ISO dup /Times-Roman ReencodeType findfont setfont\n");
else
(void) fprintf(file,"/%s-ISO dup /%s ReencodeType findfont setfont\n",
draw_info->font,draw_info->font);
(void) fprintf(file,"[%g %g %g %g 0 0] concat\n",draw_info->affine.sx,
-draw_info->affine.rx,-draw_info->affine.ry,draw_info->affine.sy);
text=EscapeParenthesis(draw_info->text);
if (identity == MagickFalse)
(void) fprintf(file,"(%s) stringwidth pop -0.5 mul -0.5 rmoveto\n",text);
(void) fprintf(file,"(%s) show\n",text);
text=DestroyString(text);
(void) fprintf(file,"showpage\n");
(void) fclose(file);
(void) FormatMagickString(geometry,MaxTextExtent,"%ldx%ld+0+0!",(long)
(extent.x+0.5),(long) (extent.y+0.5));
annotate_info=AcquireImageInfo();
(void) FormatMagickString(annotate_info->filename,MaxTextExtent,"ps:%s",
filename);
(void) CloneString(&annotate_info->page,geometry);
if (draw_info->density != (char *) NULL)
(void) CloneString(&annotate_info->density,draw_info->density);
annotate_info->antialias=draw_info->text_antialias;
annotate_image=ReadImage(annotate_info,&image->exception);
CatchException(&image->exception);
annotate_info=DestroyImageInfo(annotate_info);
(void) RelinquishUniqueFileResource(filename);
if (annotate_image == (Image *) NULL)
return(MagickFalse);
resolution.x=DefaultResolution;
resolution.y=DefaultResolution;
if (draw_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(draw_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (identity == MagickFalse)
(void) TransformImage(&annotate_image,"0x0",(char *) NULL);
else
{
RectangleInfo
crop_info;
crop_info=GetImageBoundingBox(annotate_image,&annotate_image->exception);
crop_info.height=(unsigned long) ((resolution.y/DefaultResolution)*
ExpandAffine(&draw_info->affine)*draw_info->pointsize+0.5);
crop_info.y=(long) ((resolution.y/DefaultResolution)*extent.y/8.0+0.5);
(void) FormatMagickString(geometry,MaxTextExtent,"%lux%lu%+ld%+ld",
crop_info.width,crop_info.height,crop_info.x,crop_info.y);
(void) TransformImage(&annotate_image,geometry,(char *) NULL);
}
metrics->pixels_per_em.x=(resolution.y/DefaultResolution)*
ExpandAffine(&draw_info->affine)*draw_info->pointsize;
metrics->pixels_per_em.y=metrics->pixels_per_em.x;
metrics->ascent=metrics->pixels_per_em.x;
metrics->descent=metrics->pixels_per_em.y/-5.0;
metrics->width=(double) annotate_image->columns/
ExpandAffine(&draw_info->affine);
metrics->height=1.152*metrics->pixels_per_em.x;
metrics->max_advance=metrics->pixels_per_em.x;
metrics->bounds.x1=0.0;
metrics->bounds.y1=metrics->descent;
metrics->bounds.x2=metrics->ascent+metrics->descent;
metrics->bounds.y2=metrics->ascent+metrics->descent;
metrics->underline_position=(-2.0);
metrics->underline_thickness=1.0;
if (draw_info->render == MagickFalse)
{
annotate_image=DestroyImage(annotate_image);
return(MagickTrue);
}
if (draw_info->fill.opacity != TransparentOpacity)
{
ExceptionInfo
*exception;
MagickBooleanType
sync;
PixelPacket
fill_color;
ViewInfo
*annotate_view;
/*
Render fill color.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (annotate_image->matte == MagickFalse)
(void) SetImageAlphaChannel(annotate_image,OpaqueAlphaChannel);
fill_color=draw_info->fill;
exception=(&image->exception);
annotate_view=AcquireCacheView(annotate_image);
for (y=0; y < (long) annotate_image->rows; y++)
{
q=GetCacheViewAuthenticPixels(annotate_view,0,y,annotate_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (long) annotate_image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
q->opacity=RoundToQuantum(QuantumRange-(((QuantumRange-
(MagickRealType) PixelIntensityToQuantum(q))*(QuantumRange-
fill_color.opacity))/QuantumRange));
q->red=fill_color.red;
q->green=fill_color.green;
q->blue=fill_color.blue;
q++;
}
sync=SyncCacheViewAuthenticPixels(annotate_view,exception);
if (sync == MagickFalse)
break;
}
annotate_view=DestroyCacheView(annotate_view);
(void) CompositeImage(image,OverCompositeOp,annotate_image,
(long) (offset->x+0.5),(long) (offset->y-(metrics->ascent+
metrics->descent)+0.5));
}
annotate_image=DestroyImage(annotate_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e n d e r X 1 1 %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RenderX11() renders text on the image with an X11 font. It also returns the
% bounding box of the text relative to the image.
%
% The format of the RenderX11 method is:
%
% MagickBooleanType RenderX11(Image *image,DrawInfo *draw_info,
% const PointInfo *offset,TypeMetric *metrics)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o offset: (x,y) location of text relative to image.
%
% o metrics: bounding box of text.
%
*/
#if defined(MAGICKCORE_X11_DELEGATE)
static MagickBooleanType RenderX11(Image *image,const DrawInfo *draw_info,
const PointInfo *offset,TypeMetric *metrics)
{
MagickBooleanType
status;
static DrawInfo
cache_info;
static Display
*display = (Display *) NULL;
static XAnnotateInfo
annotate_info;
static XFontStruct
*font_info;
static XPixelInfo
pixel;
static XResourceInfo
resource_info;
static XrmDatabase
resource_database;
static XStandardColormap
*map_info;
static XVisualInfo
*visual_info;
unsigned long
height,
width;
if (display == (Display *) NULL)
{
ImageInfo
*image_info;
/*
Open X server connection.
*/
display=XOpenDisplay(draw_info->server_name);
if (display == (Display *) NULL)
{
ThrowXWindowException(XServerError,"UnableToOpenXServer",
draw_info->server_name);
return(MagickFalse);
}
/*
Get user defaults from X resource database.
*/
(void) XSetErrorHandler(XError);
image_info=AcquireImageInfo();
resource_database=XGetResourceDatabase(display,GetClientName());
XGetResourceInfo(image_info,resource_database,GetClientName(),
&resource_info);
resource_info.close_server=MagickFalse;
resource_info.colormap=PrivateColormap;
resource_info.font=AcquireString(draw_info->font);
resource_info.background_color=AcquireString("#ffffffffffff");
resource_info.foreground_color=AcquireString("#000000000000");
map_info=XAllocStandardColormap();
if (map_info == (XStandardColormap *) NULL)
{
ThrowXWindowException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
return(MagickFalse);
}
/*
Initialize visual info.
*/
visual_info=XBestVisualInfo(display,map_info,&resource_info);
if (visual_info == (XVisualInfo *) NULL)
{
ThrowXWindowException(XServerError,"UnableToGetVisual",
image->filename);
return(MagickFalse);
}
map_info->colormap=(Colormap) NULL;
pixel.pixels=(unsigned long *) NULL;
/*
Initialize Standard Colormap info.
*/
XGetMapInfo(visual_info,XDefaultColormap(display,visual_info->screen),
map_info);
XGetPixelPacket(display,visual_info,map_info,&resource_info,
(Image *) NULL,&pixel);
pixel.annotate_context=XDefaultGC(display,visual_info->screen);
/*
Initialize font info.
*/
font_info=XBestFont(display,&resource_info,MagickFalse);
if (font_info == (XFontStruct *) NULL)
{
ThrowXWindowException(XServerError,"UnableToLoadFont",
draw_info->font);
return(MagickFalse);
}
if ((map_info == (XStandardColormap *) NULL) ||
(visual_info == (XVisualInfo *) NULL) ||
(font_info == (XFontStruct *) NULL))
{
XFreeResources(display,visual_info,map_info,&pixel,font_info,
&resource_info,(XWindowInfo *) NULL);
ThrowXWindowException(XServerError,"UnableToLoadFont",
image->filename);
return(MagickFalse);
}
cache_info=(*draw_info);
}
/*
Initialize annotate info.
*/
XGetAnnotateInfo(&annotate_info);
annotate_info.stencil=ForegroundStencil;
if (cache_info.font != draw_info->font)
{
/*
Type name has changed.
*/
(void) XFreeFont(display,font_info);
(void) CloneString(&resource_info.font,draw_info->font);
font_info=XBestFont(display,&resource_info,MagickFalse);
if (font_info == (XFontStruct *) NULL)
{
ThrowXWindowException(XServerError,"UnableToLoadFont",
draw_info->font);
return(MagickFalse);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(AnnotateEvent,GetMagickModule(),
"Font %s; pointsize %g",draw_info->font != (char *) NULL ?
draw_info->font : "none",draw_info->pointsize);
cache_info=(*draw_info);
annotate_info.font_info=font_info;
annotate_info.text=(char *) draw_info->text;
annotate_info.width=(unsigned int) XTextWidth(font_info,draw_info->text,
(int) strlen(draw_info->text));
annotate_info.height=(unsigned int) font_info->ascent+font_info->descent;
metrics->pixels_per_em.x=(double) font_info->max_bounds.width;
metrics->pixels_per_em.y=(double) font_info->ascent+font_info->descent;
metrics->ascent=(double) font_info->ascent+4;
metrics->descent=(double) (-font_info->descent);
metrics->width=annotate_info.width/ExpandAffine(&draw_info->affine);
metrics->height=font_info->ascent+font_info->descent;
metrics->max_advance=(double) font_info->max_bounds.width;
metrics->bounds.x1=0.0;
metrics->bounds.y1=metrics->descent;
metrics->bounds.x2=metrics->ascent+metrics->descent;
metrics->bounds.y2=metrics->ascent+metrics->descent;
metrics->underline_position=(-2.0);
metrics->underline_thickness=1.0;
if (draw_info->render == MagickFalse)
return(MagickTrue);
if (draw_info->fill.opacity == TransparentOpacity)
return(MagickTrue);
/*
Render fill color.
*/
width=annotate_info.width;
height=annotate_info.height;
if ((draw_info->affine.rx != 0.0) || (draw_info->affine.ry != 0.0))
{
if (((draw_info->affine.sx-draw_info->affine.sy) == 0.0) &&
((draw_info->affine.rx+draw_info->affine.ry) == 0.0))
annotate_info.degrees=(180.0/MagickPI)*
atan2(draw_info->affine.rx,draw_info->affine.sx);
}
(void) FormatMagickString(annotate_info.geometry,MaxTextExtent,
"%lux%lu+%ld+%ld",width,height,(long) (offset->x+0.5),
(long) (offset->y-metrics->ascent-metrics->descent+0.5));
pixel.pen_color.red=ScaleQuantumToShort(draw_info->fill.red);
pixel.pen_color.green=ScaleQuantumToShort(draw_info->fill.green);
pixel.pen_color.blue=ScaleQuantumToShort(draw_info->fill.blue);
status=XAnnotateImage(display,&pixel,&annotate_info,image);
if (status == 0)
{
ThrowXWindowException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
return(MagickFalse);
}
return(MagickTrue);
}
#else
static MagickBooleanType RenderX11(Image *image,const DrawInfo *draw_info,
const PointInfo *offset,TypeMetric *metrics)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn","`%s' (X11)",
image->filename);
return(MagickFalse);
}
#endif
|
GB_unop__cos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cos_fp32_fp32)
// op(A') function: GB (_unop_tran__cos_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = cosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = cosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cos_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cos_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
final_list.c | #include "final_list.h"
#include <stdio.h>
#include <stdlib.h>
asynclistnode* _search_async_state(asynclist* list, int* check_state, int n, int* flag)
{
asynclistnode* node = list->next;
if(!node){
*flag = 0;
return NULL;
}
*flag = 1;
for (int i = 0; i < n && *flag; ++i)
{
if(node->state[i] != check_state[i]){
*flag = 0;
}
}
if(*flag == 1){
return node;
}
while(node->next)
{
node = node->next;
*flag = 1;
for (int i = 0; i < n && *flag; ++i)
{
if(node->state[i] != check_state[i]){
*flag = 0;
}
}
if(*flag == 1){
return node;
}
}
return node;
}
void init_asynclist(asynclist* list)
{
list->next = NULL;
list->n_elements = 0;
}
void init_synclist(synclist* list)
{
list->next = NULL;
}
void add_asynclist(asynclist* list, int* state, int n)
{
int flag;
asynclistnode* node = _search_async_state(list,state,n,&flag);
if(flag){
node->n_occurances++;
}
else if(!node)
{
asynclistnode* new_node = (asynclistnode*) malloc(sizeof(asynclistnode));
new_node->state = (int*) malloc(n*sizeof(int));
for (int i = 0; i < n; ++i)
{
new_node->state[i] = state[i];
}
new_node->n_occurances = 1;
list->next = new_node;
new_node->next = NULL;
new_node->prev = NULL;
list->n_elements++;
}
else{
asynclistnode* new_node = (asynclistnode*) malloc(sizeof(asynclistnode));
node->next = new_node;
new_node->prev = node;
new_node->next = NULL;
new_node->state = (int*) malloc(n*sizeof(int));
for (int i = 0; i < n; ++i)
{
new_node->state[i] = state[i];
}
new_node->n_occurances = 1;
list->n_elements++;
}
}
void clear_asynclist(asynclist* list)
{
asynclistnode* node = list->next;
while(node)
{
asynclistnode* nextnode = node->next;
free(node->state);
free(node);
node = nextnode;
}
free(list);
}
void print_asynclist(asynclist* list, int n)
{
printf("N_Occurances State\n");
asynclistnode* node = list->next;
while(node)
{
printf("%d",node->n_occurances);
for (int i = 0; i < n; ++i)
{
printf(" %d",node->state[i]);
}
printf("\n");
node = node->next;
}
}
asynclist* merge_asynclist(asynclist* list1, asynclist* list2, int n)
{
if(list1->n_elements<list2->n_elements)
{
asynclist* temp = list1;
list1 = list2;
list2 = temp;
}
asynclistnode* node = list2->next;
while(node)
{
asynclistnode* check = list1->next;
int flag;
while(1)
{
flag = 1;
for (int i = 0; i < n && flag; ++i)
{
if(node->state[i] != check->state[i])
{
flag = 0;
}
}
if(flag){
flag = 1;
break;
}
if(!(check->next))
{
break;
}
else{
check = check->next;
}
}
if(flag)
{
check->n_occurances += node->n_occurances;
}
else
{
asynclistnode* new = (asynclistnode*)malloc(sizeof(asynclistnode));
check->next = new;
new->next = NULL;
new->prev = check;
new->n_occurances = node->n_occurances;
new->state = (int*)malloc(n*sizeof(int));
for (int i = 0; i < n; ++i)
{
new->state[i] = node->state[i];
}
list1->n_elements++;
}
node = node->next;
}
clear_asynclist(list2);
return list1;
}
void init_tree(base* t)
{
t->root = NULL;
}
int _compare(int n, int* a, int* b)
{
for (int i = 0; i < n; ++i)
{
if(a[i]>b[i])
{
return -1;
}
else if(a[i]<b[i])
{
return 1;
}
}
return 0;
}
void add_node(base* t, int n, int* state)
{
tree* node = t->root;
if(!node)
{
tree* newnode = (tree*)malloc(sizeof(tree));
newnode->state = (int*)malloc(n*sizeof(int));
newnode->n_occurances = 1;
#pragma omp parallel for
for (int i = 0; i < n; ++i)
{
newnode->state[i] = state[i];
}
t->root = newnode;
newnode->left = newnode->right = NULL;
return ;
}
while(node)
{
int comp = _compare(n,node->state,state);
if(!comp)
{
node->n_occurances++;
return;
}
else if(comp == 1)
{
if(node->right){
node = node->right;
}
else{
tree* newnode = (tree*)malloc(sizeof(tree));
newnode->state = (int*)malloc(n*sizeof(int));
newnode->n_occurances = 1;
#pragma omp parallel for
for (int i = 0; i < n; ++i)
{
newnode->state[i] = state[i];
}
node->right = newnode;
newnode->left = newnode->right = NULL;
return;
}
}
else{
if(node->left){
node = node->left;
}
else{
tree* newnode = (tree*)malloc(sizeof(tree));
newnode->state = (int*)malloc(n*sizeof(int));
newnode->n_occurances = 1;
#pragma omp parallel for
for (int i = 0; i < n; ++i)
{
newnode->state[i] = state[i];
}
node->left = newnode;
newnode->left = newnode->right = NULL;
return;
}
}
}
}
void _delete_node(tree* t)
{
if(t==NULL)
{
return;
}
else{
tree* child[2] = {t->left,t->right};
#pragma omp parallel for
for (int i = 0; i < 2; ++i)
{
_delete_node(child[i]);
}
free(t->state);
free(t);
}
}
void delete_tree(base* t)
{
_delete_node(t->root);
}
void _print_recur(tree* t, int n)
{
if(t==NULL)
{
return;
}
else{
tree* child[2] = {t->left,t->right};
#pragma omp parallel for
for (int i = 0; i < 2; ++i)
{
_print_recur(child[i],n);
}
#pragma omp critical(printing)
{
printf("%d\t", t->n_occurances);
for (int i = 0; i < n; ++i)
{
printf("%d\t",t->state[i]);
}
printf("\n");
}
}
}
void print_tree(base* t, int n)
{
printf("N_Occurances\t");
for (int i = 0; i < n; ++i)
{
printf("gene%d\t",i);
}
printf("\n");
_print_recur(t->root,n);
} |
GB_binop__minus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_uint8
// A.*B function (eWiseMult): GB_AemultB__minus_uint8
// A*D function (colscale): GB_AxD__minus_uint8
// D*A function (rowscale): GB_DxB__minus_uint8
// C+=B function (dense accum): GB_Cdense_accumB__minus_uint8
// C+=b function (dense accum): GB_Cdense_accumb__minus_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_uint8
// C=scalar+B GB_bind1st__minus_uint8
// C=scalar+B' GB_bind1st_tran__minus_uint8
// C=A+scalar GB_bind2nd__minus_uint8
// C=A'+scalar GB_bind2nd_tran__minus_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simd_metadata.c | // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
void h1(float *c, float *a, double b[], int size)
{
// CHECK-LABEL: define{{.*}} void @h1
int t = 0;
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)
// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
// CHECK-NEXT: load
// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_7:[0-9]+]]
}
}
void h2(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define{{.*}} void @h2
int t = 0;
#pragma omp simd linear(t)
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_10:[0-9]+]]
}
// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H2_HEADER:![0-9]+]]
}
void h3(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define{{.*}} void @h3
#pragma omp simd
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
c[j*i] = a[i] * b[j];
}
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_13:[0-9]+]]
}
// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H3_HEADER_INNER:![0-9]+]]
// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H3_HEADER:![0-9]+]]
}
// Metadata for h1:
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_16:![0-9]+]], [[LOOP_VEC_ENABLE:![0-9]+]]}
// CHECK: [[LOOP_WIDTH_16]] = !{!"llvm.loop.vectorize.width", i32 16}
// CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8:![0-9]+]], [[LOOP_VEC_ENABLE]]}
// CHECK: [[LOOP_WIDTH_8]] = !{!"llvm.loop.vectorize.width", i32 8}
// CHECK: ![[ACCESS_GROUP_7]] = distinct !{}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], ![[PARALLEL_ACCESSES_9:[0-9]+]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]]}
// CHECK: ![[PARALLEL_ACCESSES_9]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_7]]}
//
// Metadata for h2:
// CHECK: ![[ACCESS_GROUP_10]] = distinct !{}
// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], ![[PARALLEL_ACCESSES_12:[0-9]+]], [[LOOP_VEC_ENABLE]]}
// CHECK: ![[PARALLEL_ACCESSES_12]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_10]]}
//
// Metadata for h3:
// CHECK: ![[ACCESS_GROUP_13]] = distinct !{}
// CHECK: [[LOOP_H3_HEADER]] = distinct !{[[LOOP_H3_HEADER]], ![[PARALLEL_ACCESSES_15:[0-9]+]], [[LOOP_VEC_ENABLE]]}
// CHECK: ![[PARALLEL_ACCESSES_15]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_13]]}
//
|
DRB100-task-reference-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cover the implicitly determined rule: In an orphaned task generating construct,
* formal arguments passed by reference are firstprivate.
* This requires OpenMP 4.5 to work.
* Earlier OpenMP does not allow a reference type for a variable within firstprivate().
* */
#include <stdio.h>
#define MYLEN 100
int a[MYLEN];
void gen_task(int i)
{
a[i]= i+1;
}
int main()
{
int i=0;
{
#pragma omp parallel for private(i )
for (i=0; i<MYLEN; i++)
{
gen_task(i);
}
}
/* correctness checking */
for (i=0; i<MYLEN; i++)
{
//assert (a[i]==i+1);
if (a[i]!= i+1)
{
printf("warning: a[%d] = %d, not expected %d\n", i, a[i], i+1);
}
}
return 0;
}
|
HYPRE_IJMatrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_BigInt ilower,
HYPRE_BigInt iupper,
HYPRE_BigInt jlower,
HYPRE_BigInt jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_BigInt *info;
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
HYPRE_BigInt row0, col0, rowN, colN;
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_IJMatrixOMPFlag(ijmatrix) = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper+1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jlower > jupper+1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
info = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower;
hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper+1;
hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower;
hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper+1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid == 0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs-1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs-1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
hypre_TFree(info, HYPRE_MEMORY_HOST);
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if hypre_IJMatrixAssumedPart(ijmatrix)
{
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixDestroyParCSR( ijmatrix );
}
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
HYPRE_Int
HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* This is a helper routine to compute a prefix sum of integer values.
*
* The current implementation is okay for modest numbers of threads.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrefixSumInt(HYPRE_Int nvals,
HYPRE_Int *vals,
HYPRE_Int *sums)
{
HYPRE_Int j, nthreads, bsize;
nthreads = hypre_NumThreads();
bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */
if (nvals < nthreads || bsize == 1)
{
sums[0] = 0;
for (j=1; j < nvals; j++)
sums[j] += sums[j-1] + vals[j-1];
}
else
{
/* Compute preliminary partial sums (in parallel) within each interval */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j+bsize), nvals);
sums[0] = 0;
for (i = j+1; i < n; i++)
{
sums[i] = sums[i-1] + vals[i-1];
}
}
/* Compute final partial sums (in serial) for the first entry of every interval */
for (j = bsize; j < nvals; j += bsize)
{
sums[j] = sums[j-bsize] + sums[j-1] + vals[j-1];
}
/* Compute final partial sums (in parallel) for the remaining entries */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = bsize; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j+bsize), nvals);
for (i = j+1; i < n; i++)
{
sums[i] += sums[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "set");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "add");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
return( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) );
}
else
#endif
{
return( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_BigInt *ilower,
HYPRE_BigInt *iupper,
HYPRE_BigInt *jlower,
HYPRE_BigInt *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
*ilower = row_partitioning[0];
*iupper = row_partitioning[1]-1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1]-1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetRowSizesParCSR( ijmatrix , sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixRead
* create IJMatrix on host memory
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt I, J;
HYPRE_Int ncols;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
{
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
}
else
{
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
void *object;
HYPRE_IJMatrixGetObject(matrix, &object);
HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object;
HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix);
if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST )
{
hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename);
}
else
{
HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename);
hypre_ParCSRMatrixDestroy(par_csr2);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetOMPFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixTranspose( HYPRE_IJMatrix matrix_A,
HYPRE_IJMatrix *matrix_AT )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_AT;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_AT) = NULL;
hypre_IJMatrixTranslator(ij_AT) = NULL;
hypre_IJMatrixAssumedPart(ij_AT) = NULL;
hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_AT) = 1;
hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A);
hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A);
hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A);
hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A);
hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A);
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i];
hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixTransposeParCSR(ij_A, ij_AT);
}
else
{
hypre_error_in_arg(1);
}
*matrix_AT = (HYPRE_IJMatrix) ij_AT;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixNorm
*
* TODO: Add other norms
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixNorm( HYPRE_IJMatrix matrix,
HYPRE_Real *norm )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR)
{
hypre_IJMatrixNormParCSR(ijmatrix, norm);
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAdd
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAdd( HYPRE_Complex alpha,
HYPRE_IJMatrix matrix_A,
HYPRE_Complex beta,
HYPRE_IJMatrix matrix_B,
HYPRE_IJMatrix *matrix_C )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B;
hypre_IJMatrix *ij_C;
HYPRE_BigInt *row_partitioning_A;
HYPRE_BigInt *col_partitioning_A;
HYPRE_BigInt *row_partitioning_B;
HYPRE_BigInt *col_partitioning_B;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Check if A and B have the same row/col partitionings */
row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A);
row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B);
col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A);
col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B);
for (i = 0; i < 2; i++)
{
if (row_partitioning_A[i] != row_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same row partitioning!");
return hypre_error_flag;
}
if (col_partitioning_A[i] != col_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same col partitioning!");
return hypre_error_flag;
}
}
ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_C) = NULL;
hypre_IJMatrixTranslator(ij_C) = NULL;
hypre_IJMatrixAssumedPart(ij_C) = NULL;
hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_C) = 1;
hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A);
/* Copy row/col partitioning of A to C */
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i];
hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C);
}
else
{
hypre_error_in_arg(1);
}
*matrix_C = (HYPRE_IJMatrix) ij_C;
return hypre_error_flag;
}
|
ex1.c | /******************************************************************************
* FILE: omp_workshare1.c
* DESCRIPTION:
* OpenMP Example - Loop Work-sharing - C/C++ Version
* In this example, the iterations of a loop are scheduled dynamically
* across the team of threads. A thread will perform CHUNK iterations
* at a time before being scheduled for the next CHUNK of work.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
double start = omp_get_wtime();
double elapsed;
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp for schedule(static,chunk)
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
} /* end of parallel section */
double end = omp_get_wtime();
elapsed = end - start;
printf("The elapsed time is: %f\n", elapsed);
}
|
matmul_double_avx2.c | /*
* Square matrix multiplication
* A[N][N] * B[N][N] = C[N][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N 512
//#define N 16
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void init(double **A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = (double)rand()/(double)(RAND_MAX/10.0);
}
}
}
void matmul_simd(double **A, double **B, double **C) {
int i,j,k;
double temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
#pragma omp simd reduction(+:temp) simdlen(8)
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
// Debug functions
void print_matrix(double **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%.2f ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void matmul_serial(double **A, double **B, double **C) {
int i,j,k;
double temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
double check(double **A, double **B){
double difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
// Main
int main(int argc, char *argv[]) {
//Set everything up
double **A = malloc(sizeof(double*)*N);
double **B = malloc(sizeof(double*)*N);
double **C_simd = malloc(sizeof(double*)*N);
double **C_serial = malloc(sizeof(double*)*N);
double **BT = malloc(sizeof(double*)*N);
for (int i = 0; i<N; i++) {
A[i] = malloc(sizeof(double)*N);
B[i] = malloc(sizeof(double)*N);
C_simd[i] = malloc(sizeof(double)*N);
C_serial[i] = malloc(sizeof(double)*N);
BT[i] = malloc(sizeof(double)*N);
}
srand(time(NULL));
init(A);
init(B);
for(int line = 0; line<N; line++){
for(int col = 0; col<N; col++){
BT[line][col] = B[col][line];
}
}
int i;
int num_runs = 10;
double elapsed = read_timer();
for (i=0; i<num_runs; i++)
matmul_simd(A, BT, C_simd);
elapsed = (read_timer() - elapsed);
double elapsed_serial = read_timer();
for (i=0; i<num_runs; i++)
matmul_serial(A, BT, C_serial);
elapsed_serial = (read_timer() - elapsed_serial);
print_matrix(A);
print_matrix(BT);
puts("=\n");
print_matrix(C_simd);
puts("---------------------------------");
print_matrix(C_serial);
double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed));
double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial));
printf("======================================================================================================\n");
printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp);
printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial);
printf("Correctness check: %f\n", check(C_simd,C_serial));
return 0;
}
|
covariance.c | /**
* covariance.c: This file was adapted from PolyBench/GPU 1.0 test
* suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
/* Problem size */
#define M SIZE
#define N SIZE
#define sqrt_of_array_cell(x, j) sqrt(x[j])
#define FLOAT_N 3214212.01
#define EPS 0.005
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *data) {
int i, j;
for (i = 1; i < (M + 1); i++) {
for (j = 1; j < (N + 1); j++) {
data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / M;
}
}
}
int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 1; i < (M + 1); i++) {
for (j = 1; j < (N + 1); j++) {
if (percentDiff(symmat[i * (N + 1) + j],
symmat_outputFromGpu[i * (N + 1) + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void covariance(DATA_TYPE *data, DATA_TYPE *symmat, DATA_TYPE *mean) {
int i, j, j1, j2;
/* Determine mean of column vectors of input data matrix */
for (j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
for (i = 1; i < (N + 1); i++) {
for (j = 1; j < (M + 1); j++) {
data[i * (M + 1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 1; j1 < (M + 1); j1++) {
for (j2 = j1; j2 < (M + 1); j2++) {
symmat[j1 * (M + 1) + j2] = 0.0;
for (i = 1; i < N + 1; i++) {
symmat[j1 * (M + 1) + j2] +=
data[i * (M + 1) + j1] * data[i * (M + 1) + j2];
}
symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2];
}
}
}
void covariance_OMP(DATA_TYPE *data, DATA_TYPE *data2, DATA_TYPE *symmat,
DATA_TYPE *mean) {
/* Determine mean of column vectors of input data matrix */
#pragma omp target map(to : data[ : (M + 1) * (N + 1)]) map( \
tofrom : mean[ : (M + 1)], \
data2[ : (M + 1) * (N + 1)]) map(from : symmat[ : ( \
M + 1) * (N + 1)]) device(DEVICE_ID)
{
#pragma omp parallel for
for (int j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (int i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
#pragma omp parallel for // collapse(2)
for (int i = 1; i < (N + 1); i++) {
for (int j = 1; j < (M + 1); j++) {
data2[i * (M + 1) + j] = data[i * (M + 1) + j] - mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp parallel for // collapse(2) schedule(dynamic,8)
for (int j1 = 1; j1 < (M + 1); j1++) {
for (int j2 = j1; j2 < (M + 1); j2++) {
symmat[j1 * (M + 1) + j2] = 0.0;
for (int i = 1; i < N + 1; i++) {
symmat[j1 * (M + 1) + j2] +=
data2[i * (M + 1) + j1] * data2[i * (M + 1) + j2];
}
symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2];
}
}
}
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *data;
DATA_TYPE *data_GPU;
DATA_TYPE *data2_GPU;
DATA_TYPE *symmat;
DATA_TYPE *mean;
DATA_TYPE *mean_GPU;
DATA_TYPE *symmat_outputFromGpu;
data = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE));
data2_GPU = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE));
symmat = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE));
mean = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE));
symmat_outputFromGpu =
(DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE));
mean_GPU = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE));
fprintf(stdout, "<< Covariance Computation >>\n");
init_arrays(data);
t_start = rtclock();
covariance_OMP(data, data2_GPU, symmat_outputFromGpu, mean_GPU);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
covariance(data, symmat, mean);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(symmat, symmat_outputFromGpu);
#endif
free(data);
free(symmat);
free(mean);
free(symmat_outputFromGpu);
return fail;
}
|
GB_unop__cosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cosh_fp64_fp64
// op(A') function: GB_unop_tran__cosh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = cosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = cosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cosh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cosh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi-avx.c | #include <immintrin.h>
inline
void kernel(double* v1, double * v2, int m)
{
__m256d alpha = _mm256_set1_pd(0.25);
//
__m256d phi_e = _mm256_loadu_pd (v1 + 1 );
__m256d phi_w = _mm256_loadu_pd (v1 - 1 );
__m256d phi_n = _mm256_loadu_pd (v1 + m);
__m256d phi_s = _mm256_loadu_pd (v1 - m);
//
phi_e = _mm256_add_pd(phi_e, phi_s);
phi_e = _mm256_add_pd(phi_e, phi_n);
//phi_e = _mm_fmadd_pd(alpha, phi_e, phi_w);
phi_e = _mm256_add_pd(phi_e, phi_w);
phi_e = _mm256_mul_pd(alpha, phi_e);
//
_mm256_storeu_pd(v2, phi_e);
}
inline
void kernel_sequential(double* v1, double * v2, int m)
{
double phi_e = *(v1 + 1);
double phi_w = *(v1 - 1);
double phi_n = *(v1 + m);
double phi_s = *(v1 - m);
double phi = 0.25*(phi_e + phi_w + phi_n + phi_s);
*(v2) = phi;
}
void laplacian(double* v1, double* v2, int dim_m, int dim_n)
{
//
//#pragma omp parallel
#pragma omp parallel for schedule(static)
for (int j = 1; j < dim_n - 1; ++j )
{
__builtin_prefetch ((void *) v2 + j*dim_n + 256, 0, 1);
int i;
for (i = 1; i < dim_m - 1 - (dim_m - 1)%4; i = i + 4)
{
kernel(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n);
}
for (; i < dim_m - 1; ++i)
{
kernel_sequential(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n);
}
}
}
|
GB_unop__lgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lgamma_fp64_fp64
// op(A') function: GB_unop_tran__lgamma_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = lgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = lgamma (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lgamma_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lgamma_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_int32
// op(A') function: GB_tran__abs_fp64_int32
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_int32
(
double *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ULTRABuilder.h | /**********************************************************************************
Copyright (c) 2020 Tobias Zündorf
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**********************************************************************************/
#pragma once
#include <algorithm>
#include "../../../DataStructures/TripBased/Data.h"
#include "../../../DataStructures/RAPTOR/Data.h"
#include "../../../Helpers/MultiThreading.h"
#include "../../../Helpers/Timer.h"
#include "../../../Helpers/Console/Progress.h"
#include "ShortcutSearch.h"
namespace TripBased {
template<bool DEBUG = false>
class ULTRABuilder {
public:
inline static constexpr bool Debug = DEBUG;
using Type = ULTRABuilder<Debug>;
public:
ULTRABuilder(const Data& data) :
data(data) {
stopEventGraph.addVertices(data.numberOfStopEvents());
}
void computeShortcuts(const ThreadPinning& threadPinning, const int witnessTransferLimit = 15 * 60, const int minDepartureTime = -never, const int maxDepartureTime = never, const bool verbose = true) noexcept {
if (verbose) std::cout << "Computing shortcuts with " << threadPinning.numberOfThreads << " threads." << std::endl;
std::vector<Shortcut> shortcuts;
Progress progress(data.numberOfStops(), verbose);
omp_set_num_threads(threadPinning.numberOfThreads);
#pragma omp parallel
{
threadPinning.pinThread();
ShortcutSearch<Debug> shortcutSearch(data, witnessTransferLimit);
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < data.numberOfStops(); i++) {
shortcutSearch.run(StopId(i), minDepartureTime, maxDepartureTime);
progress++;
}
#pragma omp critical
{
const std::vector<Shortcut>& localShortcuts = shortcutSearch.getShortcuts();
for (const Shortcut& shortcut : localShortcuts) {
shortcuts.emplace_back(shortcut);
}
}
}
std::sort(shortcuts.begin(), shortcuts.end(), [](const Shortcut& a, const Shortcut& b){
return (a.origin < b.origin) || ((a.origin == b.origin) && (a.destination < b.destination));
});
stopEventGraph.addEdge(Vertex(shortcuts[0].origin), Vertex(shortcuts[0].destination)).set(TravelTime, shortcuts[0].walkingDistance);
for (size_t i = 1; i < shortcuts.size(); i++) {
if ((shortcuts[i].origin == shortcuts[i - 1].origin) && (shortcuts[i].destination == shortcuts[i - 1].destination)) continue;
stopEventGraph.addEdge(Vertex(shortcuts[i].origin), Vertex(shortcuts[i].destination)).set(TravelTime, shortcuts[i].walkingDistance);
}
stopEventGraph.sortEdges(ToVertex);
progress.finished();
}
inline const DynamicTransferGraph& getStopEventGraph() const noexcept {
return stopEventGraph;
}
inline DynamicTransferGraph& getStopEventGraph() noexcept {
return stopEventGraph;
}
private:
const Data& data;
DynamicTransferGraph stopEventGraph;
};
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives and clauses --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP clause.
///
class OMPClause {
/// \brief Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// \brief Ending location of the clause.
SourceLocation EndLoc;
/// \brief Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// \brief Returns the starting location of the clause.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns the ending location of the clause.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid();}
StmtRange children();
ConstStmtRange children() const {
return const_cast<OMPClause *>(this)->children();
}
static bool classof(const OMPClause *T) {
return true;
}
};
/// \brief This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T>
class OMPVarList {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of variables in the list.
unsigned NumVars;
protected:
/// \brief Fetches list of variables associated with this clause.
llvm::MutableArrayRef<Expr *> getVarRefs() {
return llvm::MutableArrayRef<Expr *>(
reinterpret_cast<Expr **>(static_cast<T *>(this) + 1),
NumVars);
}
/// \brief Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
reinterpret_cast<Expr **>(static_cast<T *>(this) + 1));
}
/// \brief Build clause with number of variables \a N.
///
/// \param N Number of the variables in the clause.
///
OMPVarList(SourceLocation LParenLoc, unsigned N)
: LParenLoc(LParenLoc), NumVars(N) { }
public:
typedef llvm::MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return ArrayRef<const Expr *>(
reinterpret_cast<const Expr *const *>(static_cast<const T *>(this) + 1),
NumVars);
}
};
/// \brief This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
///
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clauses.
///
/// \param K Argument of clause.
///
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// \brief Set argument location.
///
/// \param KLoc Argument location.
///
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) { }
/// \brief Build an empty clause.
///
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown),
KindKwLoc(SourceLocation()) { }
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
StmtRange children() {
return StmtRange();
}
};
/// \brief This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
///
class OMPPrivateClause : public OMPClause, public OMPVarList<OMPPrivateClause> {
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(OMPC_private, StartLoc, EndLoc),
OMPVarList<OMPPrivateClause>(LParenLoc, N) { }
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPPrivateClause(unsigned N)
: OMPClause(OMPC_private, SourceLocation(), SourceLocation()),
OMPVarList<OMPPrivateClause>(SourceLocation(), N) { }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// \brief This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
///
class OMPFirstprivateClause : public OMPClause,
public OMPVarList<OMPFirstprivateClause> {
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(OMPC_firstprivate, StartLoc, EndLoc),
OMPVarList<OMPFirstprivateClause>(LParenLoc, N) { }
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPFirstprivateClause(unsigned N)
: OMPClause(OMPC_firstprivate, SourceLocation(), SourceLocation()),
OMPVarList<OMPFirstprivateClause>(SourceLocation(), N) { }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPFirstprivateClause *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// \brief This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
///
class OMPSharedClause : public OMPClause, public OMPVarList<OMPSharedClause> {
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(OMPC_shared, StartLoc, EndLoc),
OMPVarList<OMPSharedClause>(LParenLoc, N) { }
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPSharedClause(unsigned N)
: OMPClause(OMPC_shared, SourceLocation(), SourceLocation()),
OMPVarList<OMPSharedClause>(SourceLocation(), N) { }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Pointer to the list of clauses.
llvm::MutableArrayRef<OMPClause *> Clauses;
/// \brief Associated statement (if any) and expressions.
llvm::MutableArrayRef<Stmt *> StmtAndExpressions;
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumberOfExpressions)
: Stmt(SC), Kind(K), StartLoc(StartLoc), EndLoc(EndLoc),
Clauses(reinterpret_cast<OMPClause **>(static_cast<T *>(this) + 1),
NumClauses),
StmtAndExpressions(reinterpret_cast<Stmt **>(Clauses.end()),
NumberOfExpressions) { }
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
StmtAndExpressions[0] = S;
}
public:
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return Clauses.size(); }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const {
assert(i < Clauses.size() && "index out of bound!");
return Clauses[i];
}
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
return StmtAndExpressions[0];
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
return child_range(StmtAndExpressions.begin(), StmtAndExpressions.end());
}
ArrayRef<OMPClause *> clauses() { return Clauses; }
ArrayRef<OMPClause *> clauses() const { return Clauses; }
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned N)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, N, 1) { }
/// \brief Build an empty directive.
///
/// \param N Number of clauses.
///
explicit OMPParallelDirective(unsigned N)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), N, 1) { }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
} // end namespace clang
#endif
|
c-omp.c | /* This file contains routines to construct OpenACC and OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005-2020 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "options.h"
#include "c-common.h"
#include "gimple-expr.h"
#include "c-pragma.h"
#include "stringpool.h"
#include "omp-general.h"
#include "gomp-constants.h"
#include "memmodel.h"
#include "attribs.h"
#include "gimplify.h"
/* Complete a #pragma oacc wait construct. LOC is the location of
the #pragma. */
tree
c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
{
const int nparms = list_length (parms);
tree stmt, t;
vec<tree, va_gc> *args;
vec_alloc (args, nparms + 2);
stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
t = OMP_CLAUSE_ASYNC_EXPR (clauses);
else
t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
args->quick_push (t);
args->quick_push (build_int_cst (integer_type_node, nparms));
for (t = parms; t; t = TREE_CHAIN (t))
{
if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
args->quick_push (build_int_cst (integer_type_node,
TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
else
args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
}
stmt = build_call_expr_loc_vec (loc, stmt, args);
vec_free (args);
return stmt;
}
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_master (location_t loc, tree stmt)
{
tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
SET_EXPR_LOCATION (t, loc);
return t;
}
/* Complete a #pragma omp taskgroup construct. BODY is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_taskgroup (location_t loc, tree body, tree clauses)
{
tree stmt = make_node (OMP_TASKGROUP);
TREE_TYPE (stmt) = void_type_node;
OMP_TASKGROUP_BODY (stmt) = body;
OMP_TASKGROUP_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp critical construct. BODY is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. LOC is the location of the #pragma. */
tree
c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
OMP_CRITICAL_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
{
tree t = make_node (OMP_ORDERED);
TREE_TYPE (t) = void_type_node;
OMP_ORDERED_BODY (t) = stmt;
if (!flag_openmp /* flag_openmp_simd */
&& (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
|| OMP_CLAUSE_CHAIN (clauses)))
clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
OMP_ORDERED_CLAUSES (t) = clauses;
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Complete a #pragma omp barrier construct. LOC is the location of
the #pragma. */
void
c_finish_omp_barrier (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskwait (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskyield construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskyield (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
the expression to be implemented atomically is LHS opcode= RHS.
For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
opcode= RHS with the new or old content of LHS returned.
LOC is the location of the atomic statement. The value returned
is either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC* node which should be added to the current statement
tree with add_stmt. If TEST is set, avoid calling save_expr
or create_tmp_var*. */
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
enum tree_code opcode, tree lhs, tree rhs,
tree v, tree lhs1, tree rhs1, bool swapped,
enum omp_memory_order memory_order, bool test)
{
tree x, type, addr, pre = NULL_TREE;
HOST_WIDE_INT bitpos = 0, bitsize = 0;
if (lhs == error_mark_node || rhs == error_mark_node
|| v == error_mark_node || lhs1 == error_mark_node
|| rhs1 == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
if (TYPE_ATOMIC (type))
{
error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
return error_mark_node;
}
if (opcode == RDIV_EXPR)
opcode = TRUNC_DIV_EXPR;
/* ??? Validate that rhs does not overlap lhs. */
tree blhs = NULL;
if (TREE_CODE (lhs) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
{
tree field = TREE_OPERAND (lhs, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
&& tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
- tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
else
bitpos = 0;
bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
bitsize = tree_to_shwi (DECL_SIZE (field));
blhs = lhs;
type = TREE_TYPE (repr);
lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
repr, TREE_OPERAND (lhs, 2));
}
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
if (addr == error_mark_node)
return error_mark_node;
if (!test)
addr = save_expr (addr);
if (!test
&& TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| !VAR_P (TREE_OPERAND (addr, 0))))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr));
DECL_CONTEXT (var) = current_function_decl;
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
tree orig_lhs = lhs;
lhs = build_indirect_ref (loc, addr, RO_NULL);
tree new_lhs = lhs;
if (code == OMP_ATOMIC_READ)
{
x = build1 (OMP_ATOMIC_READ, type, addr);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
if (blhs)
x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
bitsize_int (bitsize), bitsize_int (bitpos));
return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
}
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
if (blhs)
{
lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
bitsize_int (bitsize), bitsize_int (bitpos));
if (swapped)
rhs = build_binary_op (loc, opcode, rhs, lhs, true);
else if (opcode != NOP_EXPR)
rhs = build_binary_op (loc, opcode, lhs, rhs, true);
opcode = NOP_EXPR;
}
else if (swapped)
{
rhs = build_binary_op (loc, opcode, rhs, lhs, true);
opcode = NOP_EXPR;
}
bool save = in_late_binary_op;
in_late_binary_op = true;
x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
loc, rhs, NULL_TREE);
in_late_binary_op = save;
if (x == error_mark_node)
return error_mark_node;
if (TREE_CODE (x) == COMPOUND_EXPR)
{
pre = TREE_OPERAND (x, 0);
gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre));
x = TREE_OPERAND (x, 1);
}
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
if (blhs)
rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
rhs, bitsize_int (bitpos));
/* Punt the actual generation of atomic operations to common code. */
if (code == OMP_ATOMIC)
type = void_type_node;
x = build2 (code, type, addr, rhs);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (rhs1
&& VAR_P (rhs1)
&& VAR_P (orig_lhs)
&& rhs1 != orig_lhs
&& !test)
{
if (code == OMP_ATOMIC)
error_at (loc, "%<#pragma omp atomic update%> uses two different "
"variables for memory");
else
error_at (loc, "%<#pragma omp atomic capture%> uses two different "
"variables for memory");
return error_mark_node;
}
if (lhs1
&& lhs1 != orig_lhs
&& TREE_CODE (lhs1) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
{
tree field = TREE_OPERAND (lhs1, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
repr, TREE_OPERAND (lhs1, 2));
}
if (rhs1
&& rhs1 != orig_lhs
&& TREE_CODE (rhs1) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
{
tree field = TREE_OPERAND (rhs1, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
repr, TREE_OPERAND (rhs1, 2));
}
if (code != OMP_ATOMIC)
{
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
{
if (lhs1 != orig_lhs && !test)
{
error_at (loc, "%<#pragma omp atomic capture%> uses two "
"different variables for memory");
return error_mark_node;
}
}
if (blhs)
{
x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
bitsize_int (bitsize), bitsize_int (bitpos));
type = TREE_TYPE (blhs);
}
x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
if (rhs1 && rhs1 != orig_lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (lhs1 && lhs1 != orig_lhs)
{
tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
if (lhs1addr == error_mark_node)
return error_mark_node;
if (code == OMP_ATOMIC_CAPTURE_OLD)
x = omit_one_operand_loc (loc, type, x, lhs1addr);
else
{
if (!test)
x = save_expr (x);
x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
}
}
}
else if (rhs1 && rhs1 != orig_lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (pre)
x = omit_one_operand_loc (loc, type, x, pre);
return x;
}
/* Return true if TYPE is the implementation's omp_depend_t. */
bool
c_omp_depend_t_p (tree type)
{
type = TYPE_MAIN_VARIANT (type);
return (TREE_CODE (type) == RECORD_TYPE
&& TYPE_NAME (type)
&& ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type))
== get_identifier ("omp_depend_t"))
&& (!TYPE_CONTEXT (type)
|| TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL)
&& COMPLETE_TYPE_P (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& !compare_tree_int (TYPE_SIZE (type),
2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node))));
}
/* Complete a #pragma omp depobj construct. LOC is the location of the
#pragma. */
void
c_finish_omp_depobj (location_t loc, tree depobj,
enum omp_clause_depend_kind kind, tree clause)
{
tree t = NULL_TREE;
if (!error_operand_p (depobj))
{
if (!c_omp_depend_t_p (TREE_TYPE (depobj)))
{
error_at (EXPR_LOC_OR_LOC (depobj, loc),
"type of %<depobj%> expression is not %<omp_depend_t%>");
depobj = error_mark_node;
}
else if (TYPE_READONLY (TREE_TYPE (depobj)))
{
error_at (EXPR_LOC_OR_LOC (depobj, loc),
"%<const%> qualified %<depobj%> expression");
depobj = error_mark_node;
}
}
else
depobj = error_mark_node;
if (clause == error_mark_node)
return;
if (clause)
{
gcc_assert (TREE_CODE (clause) == OMP_CLAUSE
&& OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_CHAIN (clause))
error_at (OMP_CLAUSE_LOCATION (clause),
"more than one locator in %<depend%> clause on %<depobj%> "
"construct");
switch (OMP_CLAUSE_DEPEND_KIND (clause))
{
case OMP_CLAUSE_DEPEND_DEPOBJ:
error_at (OMP_CLAUSE_LOCATION (clause),
"%<depobj%> dependence type specified in %<depend%> "
"clause on %<depobj%> construct");
return;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
error_at (OMP_CLAUSE_LOCATION (clause),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE
? "source" : "sink");
return;
case OMP_CLAUSE_DEPEND_IN:
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
kind = OMP_CLAUSE_DEPEND_KIND (clause);
t = OMP_CLAUSE_DECL (clause);
gcc_assert (t);
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
error_at (OMP_CLAUSE_LOCATION (clause),
"%<iterator%> modifier may not be specified on "
"%<depobj%> construct");
return;
}
if (TREE_CODE (t) == COMPOUND_EXPR)
{
tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1));
t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0),
t1);
}
else
t = build_fold_addr_expr (t);
break;
default:
gcc_unreachable ();
}
}
else
gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE);
if (depobj == error_mark_node)
return;
depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj);
tree dtype
= build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node),
true);
depobj = fold_convert (dtype, depobj);
tree r;
if (clause)
{
depobj = save_expr (depobj);
r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
}
int k;
switch (kind)
{
case OMP_CLAUSE_DEPEND_IN:
k = GOMP_DEPEND_IN;
break;
case OMP_CLAUSE_DEPEND_OUT:
k = GOMP_DEPEND_OUT;
break;
case OMP_CLAUSE_DEPEND_INOUT:
k = GOMP_DEPEND_INOUT;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
k = GOMP_DEPEND_MUTEXINOUTSET;
break;
case OMP_CLAUSE_DEPEND_LAST:
k = -1;
break;
default:
gcc_unreachable ();
}
t = build_int_cst (ptr_type_node, k);
depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
TYPE_SIZE_UNIT (ptr_type_node));
r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
}
/* Complete a #pragma omp flush construct. We don't do anything with
the variable list that the syntax allows. LOC is the location of
the #pragma. */
void
c_finish_omp_flush (location_t loc, int mo)
{
tree x;
if (mo == MEMMODEL_LAST)
{
x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
x = build_call_expr_loc (loc, x, 0);
}
else
{
x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
x = build_call_expr_loc (loc, x, 1,
build_int_cst (integer_type_node, mo));
}
add_stmt (x);
}
/* Check and canonicalize OMP_FOR increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
CASE_CONVERT:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert_loc (loc, TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, MINUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
case COMPOUND_EXPR:
{
/* cp_build_modify_expr forces preevaluation of the RHS to make
sure that it is evaluated before the lvalue-rvalue conversion
is applied to the LHS. Reconstruct the original expression. */
tree op0 = TREE_OPERAND (exp, 0);
if (TREE_CODE (op0) == TARGET_EXPR
&& !VOID_TYPE_P (TREE_TYPE (op0)))
{
tree op1 = TREE_OPERAND (exp, 1);
tree temp = TARGET_EXPR_SLOT (op0);
if (BINARY_CLASS_P (op1)
&& TREE_OPERAND (op1, 1) == temp)
{
op1 = copy_node (op1);
TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
return check_omp_for_incr_expr (loc, op1, decl);
}
}
break;
}
default:
break;
}
return error_mark_node;
}
/* If the OMP_FOR increment expression in INCR is of pointer type,
canonicalize it into an expression handled by gimplify_omp_for()
and return it. DECL is the iteration variable. */
static tree
c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
{
if (POINTER_TYPE_P (TREE_TYPE (decl))
&& TREE_OPERAND (incr, 1))
{
tree t = fold_convert_loc (loc,
sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
t = fold_build_pointer_plus (decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
return incr;
}
/* Validate and generate OMP_FOR.
DECLV is a vector of iteration variables, for each collapsed loop.
ORIG_DECLV, if non-NULL, is a vector with the original iteration
variables (prior to any transformations, by say, C++ iterators).
INITV, CONDV and INCRV are vectors containing initialization
expressions, controlling predicates and increment expressions.
BODY is the body of the loop and PRE_BODY statements that go before
the loop. */
tree
c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
tree orig_declv, tree initv, tree condv, tree incrv,
tree body, tree pre_body, bool final_p)
{
location_t elocus;
bool fail = false;
int i;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
fail = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (decl)))
{
error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
fail = true;
/* _Atomic iterator confuses stuff too much, so we risk ICE
trying to diagnose it further. */
continue;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error_at (elocus, "%qE is not initialized", decl);
init = integer_zero_node;
fail = true;
}
DECL_INITIAL (decl) = NULL_TREE;
init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
/* FIXME diagnostics: This should
be the location of the INIT. */
elocus,
init,
NULL_TREE);
}
if (init != error_mark_node)
{
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
}
if (cond == NULL_TREE)
{
error_at (elocus, "missing controlling predicate");
fail = true;
}
else
{
bool cond_ok = false;
/* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
evaluation of the vla VAR_DECL. We need to readd
them to the non-decl operand. See PR45784. */
while (TREE_CODE (cond) == COMPOUND_EXPR)
cond = TREE_OPERAND (cond, 1);
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR
|| TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
if (TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
{
if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
cond_ok = false;
}
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MIN_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? GT_EXPR : LE_EXPR);
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MAX_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? LT_EXPR : GE_EXPR);
else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
cond_ok = false;
}
if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
{
tree ce = NULL_TREE, *pce = &ce;
tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
for (tree c = TREE_VEC_ELT (condv, i); c != cond;
c = TREE_OPERAND (c, 1))
{
*pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
TREE_OPERAND (cond, 1));
pce = &TREE_OPERAND (*pce, 1);
}
TREE_OPERAND (cond, 1) = ce;
TREE_VEC_ELT (condv, i) = cond;
}
}
if (!cond_ok)
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
if (!fail
&& TREE_CODE (cond) == NE_EXPR
&& TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
&& TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
&& (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
!= INTEGER_CST))
{
/* For pointer to VLA, transform != into < or >
depending on whether incr is increment or decrement. */
if (TREE_CODE (incr) == PREINCREMENT_EXPR
|| TREE_CODE (incr) == POSTINCREMENT_EXPR)
TREE_SET_CODE (cond, LT_EXPR);
else
TREE_SET_CODE (cond, GT_EXPR);
}
incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
break;
incr = TREE_OPERAND (incr, 1);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (elocus,
TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
if (!fail
&& incr_ok
&& TREE_CODE (cond) == NE_EXPR)
{
tree i = TREE_OPERAND (incr, 1);
i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl);
i = c_fully_fold (i, false, NULL);
if (!final_p
&& TREE_CODE (i) != INTEGER_CST)
;
else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree unit
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (unit)
{
enum tree_code ccode = GT_EXPR;
unit = c_fully_fold (unit, false, NULL);
i = fold_convert (TREE_TYPE (unit), i);
if (operand_equal_p (unit, i, 0))
ccode = LT_EXPR;
if (ccode == GT_EXPR)
{
i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i);
if (i == NULL_TREE
|| !operand_equal_p (unit, i, 0))
{
error_at (elocus,
"increment is not constant 1 or "
"-1 for %<!=%> condition");
fail = true;
}
}
if (TREE_CODE (unit) != INTEGER_CST)
/* For pointer to VLA, transform != into < or >
depending on whether the pointer is
incremented or decremented in each
iteration. */
TREE_SET_CODE (cond, ccode);
}
}
else
{
if (!integer_onep (i) && !integer_minus_onep (i))
{
error_at (elocus,
"increment is not constant 1 or -1 for"
" %<!=%> condition");
fail = true;
}
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (code);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
OMP_FOR_ORIG_DECLS (t) = orig_declv;
SET_EXPR_LOCATION (t, locus);
return t;
}
}
/* Type for passing data in between c_omp_check_loop_iv and
c_omp_check_loop_iv_r. */
struct c_omp_check_loop_iv_data
{
tree declv;
bool fail;
location_t stmt_loc;
location_t expr_loc;
int kind;
walk_tree_lh lh;
hash_set<tree> *ppset;
};
/* Helper function called via walk_tree, to diagnose uses
of associated loop IVs inside of lb, b and incr expressions
of OpenMP loops. */
static tree
c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
{
struct c_omp_check_loop_iv_data *d
= (struct c_omp_check_loop_iv_data *) data;
if (DECL_P (*tp))
{
int i;
for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
if (*tp == TREE_VEC_ELT (d->declv, i)
|| (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
&& *tp == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i)))
|| (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
&& TREE_CHAIN (TREE_VEC_ELT (d->declv, i))
&& (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)))
== TREE_VEC)
&& *tp == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv,
i)), 2)))
{
location_t loc = d->expr_loc;
if (loc == UNKNOWN_LOCATION)
loc = d->stmt_loc;
switch (d->kind)
{
case 0:
error_at (loc, "initializer expression refers to "
"iteration variable %qD", *tp);
break;
case 1:
error_at (loc, "condition expression refers to "
"iteration variable %qD", *tp);
break;
case 2:
error_at (loc, "increment expression refers to "
"iteration variable %qD", *tp);
break;
}
d->fail = true;
}
}
/* Don't walk dtors added by C++ wrap_cleanups_r. */
else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
&& TRY_CATCH_IS_CLEANUP (*tp))
{
*walk_subtrees = 0;
return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
d->ppset, d->lh);
}
return NULL_TREE;
}
/* Diagnose invalid references to loop iterators in lb, b and incr
expressions. */
bool
c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
{
hash_set<tree> pset;
struct c_omp_check_loop_iv_data data;
int i;
data.declv = declv;
data.fail = false;
data.stmt_loc = EXPR_LOCATION (stmt);
data.lh = lh;
data.ppset = &pset;
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
{
tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
tree decl = TREE_OPERAND (init, 0);
tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
gcc_assert (COMPARISON_CLASS_P (cond));
gcc_assert (TREE_OPERAND (cond, 0) == decl);
tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
data.kind = 0;
walk_tree_1 (&TREE_OPERAND (init, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
/* Don't warn for C++ random access iterators here, the
expression then involves the subtraction and always refers
to the original value. The C++ FE needs to warn on those
earlier. */
if (decl == TREE_VEC_ELT (declv, i)
|| (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST
&& decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i))))
{
data.expr_loc = EXPR_LOCATION (cond);
data.kind = 1;
walk_tree_1 (&TREE_OPERAND (cond, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
if (TREE_CODE (incr) == MODIFY_EXPR)
{
gcc_assert (TREE_OPERAND (incr, 0) == decl);
incr = TREE_OPERAND (incr, 1);
data.kind = 2;
if (TREE_CODE (incr) == PLUS_EXPR
&& TREE_OPERAND (incr, 1) == decl)
{
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
walk_tree_1 (&TREE_OPERAND (incr, 0),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
else
{
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
walk_tree_1 (&TREE_OPERAND (incr, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
}
}
return !data.fail;
}
/* Similar, but allows to check the init or cond expressions individually. */
bool
c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
tree init, tree cond, walk_tree_lh lh)
{
hash_set<tree> pset;
struct c_omp_check_loop_iv_data data;
data.declv = declv;
data.fail = false;
data.stmt_loc = stmt_loc;
data.lh = lh;
data.ppset = &pset;
if (init)
{
data.expr_loc = EXPR_LOCATION (init);
data.kind = 0;
walk_tree_1 (&init,
c_omp_check_loop_iv_r, &data, &pset, lh);
}
if (cond)
{
gcc_assert (COMPARISON_CLASS_P (cond));
data.expr_loc = EXPR_LOCATION (init);
data.kind = 1;
if (TREE_OPERAND (cond, 0) == decl)
walk_tree_1 (&TREE_OPERAND (cond, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
else
walk_tree_1 (&TREE_OPERAND (cond, 0),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
return !data.fail;
}
/* This function splits clauses for OpenACC combined loop
constructs. OpenACC combined loop constructs are:
#pragma acc kernels loop
#pragma acc parallel loop */
tree
c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
bool is_parallel)
{
tree next, loop_clauses, nc;
loop_clauses = *not_loop_clauses = NULL_TREE;
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
/* Loop clauses. */
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_PRIVATE:
OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
loop_clauses = clauses;
break;
/* Reductions must be duplicated on both constructs. */
case OMP_CLAUSE_REDUCTION:
if (is_parallel)
{
nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (nc)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
*not_loop_clauses = nc;
}
OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
loop_clauses = clauses;
break;
/* Parallel/kernels clauses. */
default:
OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
*not_loop_clauses = clauses;
break;
}
}
return loop_clauses;
}
/* This function attempts to split or duplicate clauses for OpenMP
combined/composite constructs. Right now there are 30 different
constructs. CODE is the innermost construct in the combined construct,
and MASK allows to determine which constructs are combined together,
as every construct has at least one clause that no other construct
has (except for OMP_SECTIONS, but that can be only combined with parallel,
and OMP_MASTER, which doesn't have any clauses at all).
OpenMP combined/composite constructs are:
#pragma omp distribute parallel for
#pragma omp distribute parallel for simd
#pragma omp distribute simd
#pragma omp for simd
#pragma omp master taskloop
#pragma omp master taskloop simd
#pragma omp parallel for
#pragma omp parallel for simd
#pragma omp parallel loop
#pragma omp parallel master
#pragma omp parallel master taskloop
#pragma omp parallel master taskloop simd
#pragma omp parallel sections
#pragma omp target parallel
#pragma omp target parallel for
#pragma omp target parallel for simd
#pragma omp target parallel loop
#pragma omp target teams
#pragma omp target teams distribute
#pragma omp target teams distribute parallel for
#pragma omp target teams distribute parallel for simd
#pragma omp target teams distribute simd
#pragma omp target teams loop
#pragma omp target simd
#pragma omp taskloop simd
#pragma omp teams distribute
#pragma omp teams distribute parallel for
#pragma omp teams distribute parallel for simd
#pragma omp teams distribute simd
#pragma omp teams loop */
void
c_omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
tree next, c;
enum c_omp_clause_split s;
int i;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
cclauses[i] = NULL;
/* Add implicit nowait clause on
#pragma omp parallel {for,for simd,sections}. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
switch (code)
{
case OMP_FOR:
case OMP_SIMD:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
cclauses[C_OMP_CLAUSE_SPLIT_FOR]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
case OMP_SECTIONS:
cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
default:
break;
}
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
/* First the clauses that are unique to some constructs. */
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_DEPEND:
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_DIST_SCHEDULE:
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_PROC_BIND:
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
case OMP_CLAUSE_ORDERED:
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
case OMP_CLAUSE_SCHEDULE:
s = C_OMP_CLAUSE_SPLIT_FOR;
if (code != OMP_SIMD)
OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
break;
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_NONTEMPORAL:
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_PRIORITY:
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_CLAUSE_BIND:
s = C_OMP_CLAUSE_SPLIT_LOOP;
break;
/* Duplicate this to all of taskloop, distribute, for, simd and
loop. */
case OMP_CLAUSE_COLLAPSE:
if (code == OMP_SIMD)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
else
{
/* This must be #pragma omp target simd */
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
}
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
/* Private clause is supported on all constructs but master,
it is enough to put it on the innermost one other than master. For
#pragma omp {for,sections} put it on parallel though,
as that's what we did for OpenMP 3.1. */
case OMP_CLAUSE_PRIVATE:
switch (code)
{
case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
case OMP_FOR: case OMP_SECTIONS:
case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
default: gcc_unreachable ();
}
break;
/* Firstprivate clause is supported on all constructs but
simd, master and loop. Put it on the outermost of those and
duplicate on teams and parallel. */
case OMP_CLAUSE_FIRSTPRIVATE:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
{
if (code == OMP_SIMD
&& (mask & ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
{
/* This must be #pragma omp target simd. */
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
}
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
s = C_OMP_CLAUSE_SPLIT_TEAMS;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
/* This must be
#pragma omp parallel master taskloop{, simd}. */
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else
/* This must be
#pragma omp parallel{, for{, simd}, sections,loop}
or
#pragma omp target parallel. */
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
/* This must be one of
#pragma omp {,target }teams {distribute,loop}
#pragma omp target teams
#pragma omp {,target }teams distribute simd. */
gcc_assert (code == OMP_DISTRIBUTE
|| code == OMP_LOOP
|| code == OMP_TEAMS
|| code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
/* This must be #pragma omp distribute simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
{
/* This must be #pragma omp {,{,parallel }master }taskloop simd
or
#pragma omp {,parallel }master taskloop. */
gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
}
else
{
/* This must be #pragma omp for simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_FOR;
}
break;
/* Lastprivate is allowed on distribute, for, sections, taskloop, loop
and simd. In parallel {for{, simd},sections} we actually want to
put it on parallel rather than for or sections. */
case OMP_CLAUSE_LASTPRIVATE:
if (code == OMP_DISTRIBUTE)
{
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
}
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
}
if (code == OMP_FOR || code == OMP_SECTIONS)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
}
if (code == OMP_TASKLOOP)
{
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
}
if (code == OMP_LOOP)
{
s = C_OMP_CLAUSE_SPLIT_LOOP;
break;
}
gcc_assert (code == OMP_SIMD);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
OMP_CLAUSE_CHAIN (c) = cclauses[s];
cclauses[s] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
}
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Shared and default clauses are allowed on parallel, teams and
taskloop. */
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_DEFAULT:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_CODE (clauses));
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
else
OMP_CLAUSE_DEFAULT_KIND (c)
= OMP_CLAUSE_DEFAULT_KIND (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
}
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
== 0)
{
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
}
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_CODE (clauses));
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
else
OMP_CLAUSE_DEFAULT_KIND (c)
= OMP_CLAUSE_DEFAULT_KIND (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
}
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
/* order clauses are allowed on for, simd and loop. */
case OMP_CLAUSE_ORDER:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_ORDER);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
s = C_OMP_CLAUSE_SPLIT_SIMD;
}
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Reduction is allowed on simd, for, parallel, sections, taskloop,
teams and loop. Duplicate it on all of them, but omit on for or
sections if parallel is present (unless inscan, in that case
omit on parallel). If taskloop or loop is combined with
parallel, omit it on parallel. */
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_TASK (clauses))
{
if (code == OMP_SIMD || code == OMP_LOOP)
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"invalid %<task%> reduction modifier on construct "
"combined with %<simd%> or %<loop%>");
OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
}
else if (code != OMP_SECTIONS
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0)
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"invalid %<task%> reduction modifier on construct "
"not combined with %<parallel%>, %<for%> or "
"%<sections%>");
OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
}
}
if (OMP_CLAUSE_REDUCTION_INSCAN (clauses)
&& ((mask & ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)))
!= 0))
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"%<inscan%> %<reduction%> clause on construct other "
"than %<for%>, %<simd%>, %<for simd%>, "
"%<parallel for%>, %<parallel for simd%>");
OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0
&& !OMP_CLAUSE_REDUCTION_INSCAN (clauses))
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if (code == OMP_SECTIONS
|| code == OMP_PARALLEL
|| code == OMP_MASTER)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if (code == OMP_TASKLOOP)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else if (code == OMP_SIMD)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
}
s = C_OMP_CLAUSE_SPLIT_SIMD;
}
else
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_IN_REDUCTION:
/* in_reduction on taskloop simd becomes reduction on the simd
and keeps being in_reduction on taskloop. */
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_CLAUSE_IF:
if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK)
{
s = C_OMP_CLAUSE_SPLIT_COUNT;
switch (OMP_CLAUSE_IF_MODIFIER (clauses))
{
case OMP_PARALLEL:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
case OMP_SIMD:
if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
case OMP_TASKLOOP:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_TARGET:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
default:
break;
}
if (s != C_OMP_CLAUSE_SPLIT_COUNT)
break;
/* Error-recovery here, invalid if-modifier specified, add the
clause to just one construct. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
else
gcc_unreachable ();
break;
}
/* Otherwise, duplicate if clause to all constructs. */
if (code == OMP_SIMD)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
else
{
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
}
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
case OMP_CLAUSE_LINEAR:
/* Linear clause is allowed on simd and for. Put it on the
innermost construct. */
if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
case OMP_CLAUSE_NOWAIT:
/* Nowait clause is allowed on target, for and sections, but
is not allowed on parallel for or parallel sections. Therefore,
put it on target construct if present, because that can only
be combined with parallel for{, simd} and not with for{, simd},
otherwise to the worksharing construct. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
default:
gcc_unreachable ();
}
OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
cclauses[s] = clauses;
}
if (!flag_checking)
return;
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
&& code != OMP_SECTIONS
&& code != OMP_LOOP)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
if (code != OMP_SIMD)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
}
/* qsort callback to compare #pragma omp declare simd clauses. */
static int
c_omp_declare_simd_clause_cmp (const void *p, const void *q)
{
tree a = *(const tree *) p;
tree b = *(const tree *) q;
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
{
if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
return -1;
return 1;
}
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
{
int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
if (c < d)
return 1;
if (c > d)
return -1;
}
return 0;
}
/* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
CLAUSES on FNDECL into argument indexes and sort them. */
tree
c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
{
tree c;
vec<tree> clvec = vNULL;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
tree decl = OMP_CLAUSE_DECL (c);
tree arg;
int idx;
for (arg = parms, idx = 0; arg;
arg = TREE_CHAIN (arg), idx++)
if (arg == decl)
break;
if (arg == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a function argument", decl);
continue;
}
OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
{
decl = OMP_CLAUSE_LINEAR_STEP (c);
for (arg = parms, idx = 0; arg;
arg = TREE_CHAIN (arg), idx++)
if (arg == decl)
break;
if (arg == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a function argument", decl);
continue;
}
OMP_CLAUSE_LINEAR_STEP (c)
= build_int_cst (integer_type_node, idx);
}
}
clvec.safe_push (c);
}
if (!clvec.is_empty ())
{
unsigned int len = clvec.length (), i;
clvec.qsort (c_omp_declare_simd_clause_cmp);
clauses = clvec[0];
for (i = 0; i < len; i++)
OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
}
else
clauses = NULL_TREE;
clvec.release ();
return clauses;
}
/* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
void
c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
{
tree c;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
tree arg;
for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
arg = TREE_CHAIN (arg), i++)
if (i == idx)
break;
gcc_assert (arg);
OMP_CLAUSE_DECL (c) = arg;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
{
idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
arg = TREE_CHAIN (arg), i++)
if (i == idx)
break;
gcc_assert (arg);
OMP_CLAUSE_LINEAR_STEP (c) = arg;
}
}
}
/* Return true for __func__ and similar function-local predefined
variables (which are in OpenMP predetermined shared, allowed in
shared/firstprivate clauses). */
bool
c_omp_predefined_variable (tree decl)
{
if (VAR_P (decl)
&& DECL_ARTIFICIAL (decl)
&& TREE_READONLY (decl)
&& TREE_STATIC (decl)
&& DECL_NAME (decl)
&& (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME]
|| DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME]
|| DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME]))
return true;
return false;
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Predetermine artificial variables holding integral values, those
are usually result of gimplify_one_sizepos or SAVE_EXPR
gimplification. */
if (VAR_P (decl)
&& DECL_ARTIFICIAL (decl)
&& INTEGRAL_TYPE_P (TREE_TYPE (decl)))
return OMP_CLAUSE_DEFAULT_SHARED;
if (c_omp_predefined_variable (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
/* Diagnose errors in an OpenMP context selector, return CTX if
it is correct or error_mark_node otherwise. */
tree
c_omp_check_context_selector (location_t loc, tree ctx)
{
/* Each trait-set-selector-name can only be specified once.
There are just 4 set names. */
for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
{
error_at (loc, "selector set %qs specified more than once",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)));
return error_mark_node;
}
for (tree t = ctx; t; t = TREE_CHAIN (t))
{
/* Each trait-selector-name can only be specified once. */
if (list_length (TREE_VALUE (t)) < 5)
{
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
{
error_at (loc,
"selector %qs specified more than once in set %qs",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
IDENTIFIER_POINTER (TREE_PURPOSE (t)));
return error_mark_node;
}
}
else
{
hash_set<tree> pset;
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
if (pset.add (TREE_PURPOSE (t1)))
{
error_at (loc,
"selector %qs specified more than once in set %qs",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
IDENTIFIER_POINTER (TREE_PURPOSE (t)));
return error_mark_node;
}
}
static const char *const kind[] = {
"host", "nohost", "cpu", "gpu", "fpga", "any", NULL };
static const char *const vendor[] = {
"amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel",
"llvm", "nvidia", "pgi", "ti", "unknown", NULL };
static const char *const extension[] = { NULL };
static const char *const atomic_default_mem_order[] = {
"seq_cst", "relaxed", "acq_rel", NULL };
struct known_properties { const char *set; const char *selector;
const char *const *props; };
known_properties props[] = {
{ "device", "kind", kind },
{ "implementation", "vendor", vendor },
{ "implementation", "extension", extension },
{ "implementation", "atomic_default_mem_order",
atomic_default_mem_order } };
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
for (unsigned i = 0; i < ARRAY_SIZE (props); i++)
if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
props[i].selector)
&& !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)),
props[i].set))
for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
for (unsigned j = 0; ; j++)
{
if (props[i].props[j] == NULL)
{
if (TREE_PURPOSE (t2)
&& !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
" score"))
break;
if (props[i].props == atomic_default_mem_order)
{
error_at (loc,
"incorrect property %qs of %qs selector",
IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
"atomic_default_mem_order");
return error_mark_node;
}
else if (TREE_PURPOSE (t2))
warning_at (loc, 0,
"unknown property %qs of %qs selector",
IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
props[i].selector);
else
warning_at (loc, 0,
"unknown property %qE of %qs selector",
TREE_VALUE (t2), props[i].selector);
break;
}
else if (TREE_PURPOSE (t2) == NULL_TREE)
{
const char *str = TREE_STRING_POINTER (TREE_VALUE (t2));
if (!strcmp (str, props[i].props[j])
&& ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2))
== strlen (str) + 1))
break;
}
else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
props[i].props[j]))
break;
}
}
return ctx;
}
/* Register VARIANT as variant of some base function marked with
#pragma omp declare variant. CONSTRUCT is corresponding construct
selector set. */
void
c_omp_mark_declare_variant (location_t loc, tree variant, tree construct)
{
tree attr = lookup_attribute ("omp declare variant variant",
DECL_ATTRIBUTES (variant));
if (attr == NULL_TREE)
{
attr = tree_cons (get_identifier ("omp declare variant variant"),
unshare_expr (construct),
DECL_ATTRIBUTES (variant));
DECL_ATTRIBUTES (variant) = attr;
return;
}
if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE)
|| (construct != NULL_TREE
&& omp_context_selector_set_compare ("construct", TREE_VALUE (attr),
construct)))
error_at (loc, "%qD used as a variant with incompatible %<construct%> "
"selector sets", variant);
}
/* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally
to distinguish clauses as seen by the user. Return the "friendly" clause
name for error messages etc., where possible. See also
c/c-parser.c:c_parser_oacc_data_clause and
cp/parser.c:cp_parser_oacc_data_clause. */
const char *
c_omp_map_clause_name (tree clause, bool oacc)
{
if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (clause))
{
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_ALLOC: return "create";
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_TO: return "copyin";
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FROM: return "copyout";
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_TOFROM: return "copy";
case GOMP_MAP_RELEASE: return "delete";
case GOMP_MAP_FORCE_PRESENT: return "present";
case GOMP_MAP_ATTACH: return "attach";
case GOMP_MAP_FORCE_DETACH:
case GOMP_MAP_DETACH: return "detach";
case GOMP_MAP_DEVICE_RESIDENT: return "device_resident";
case GOMP_MAP_LINK: return "link";
case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr";
default: break;
}
return omp_clause_code_name[OMP_CLAUSE_CODE (clause)];
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
8152.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute schedule(dynamic, 14)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__isge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp64)
// A*D function (colscale): GB (_AxD__isge_fp64)
// D*A function (rowscale): GB (_DxB__isge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp64)
// C=scalar+B GB (_bind1st__isge_fp64)
// C=scalar+B' GB (_bind1st_tran__isge_fp64)
// C=A+scalar GB (_bind2nd__isge_fp64)
// C=A'+scalar GB (_bind2nd_tran__isge_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
imd_main_mpi_2d.c |
/******************************************************************************
*
* IMD -- The ITAP Molecular Dynamics Program
*
* Copyright 1996-2011 Institute for Theoretical and Applied Physics,
* University of Stuttgart, D-70550 Stuttgart
*
******************************************************************************/
/******************************************************************************
*
* imd_main_mpi_2d.c -- main loop, MPI specific part, two dimensions
*
******************************************************************************/
/******************************************************************************
* $Revision$
* $Date$
******************************************************************************/
#include "imd.h"
/******************************************************************************
*
* calc_forces
*
* The forces of the atoms are calulated here. To achive this, atoms on
* the surface of a cpu are exchanged with the neigbours.
*
* The force calculation is split into those steps:
*
* i) send atoms positions of cells on surface neighbours,
* receive atom positions from neigbours
* ii) zero forces on all cells (local and buffer)
* iii) calculate forces in local cells, use lower half of neigbours
* for each cell and use actio==reactio
* iv) calculate forces also for upper half of neighbours for all cells
* that are on the upper surface
*
******************************************************************************/
void calc_forces(int steps)
{
int n, k;
real tmpvec1[5], tmpvec2[8] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
/* fill the buffer cells */
if ((steps == steps_min) || (0 == steps % BUFSTEP)) setup_buffers();
send_cells(copy_cell,pack_cell,unpack_cell);
/* clear global accumulation variables */
tot_pot_energy = 0.0;
virial = 0.0;
vir_xx = 0.0;
vir_yy = 0.0;
vir_xy = 0.0;
nfc++;
/* clear per atom accumulation variables */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (k=0; k<nallcells; ++k) {
int i;
cell *p;
p = cell_array + k;
for (i=0; i<p->n; ++i) {
KRAFT(p,i,X) = 0.0;
KRAFT(p,i,Y) = 0.0;
POTENG(p,i) = 0.0;
#ifdef NNBR
NBANZ(p,i) = 0;
#endif
#if defined(STRESS_TENS)
PRESSTENS(p,i,xx) = 0.0;
PRESSTENS(p,i,yy) = 0.0;
PRESSTENS(p,i,xy) = 0.0;
#endif
}
}
#ifdef RIGID
/* clear total forces */
if ( nsuperatoms>0 )
for(k=0; k<nsuperatoms; k++) {
superforce[k].x = 0.0;
superforce[k].y = 0.0;
}
#endif
/* What follows is the standard one-cpu force
loop acting on our local data cells */
/* compute forces for all pairs of cells */
for (n=0; n<nlists; ++n) {
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) \
reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_xy)
#endif
for (k=0; k<npairs[n]; ++k) {
vektor pbc;
pair *P;
P = pairs[n] + k;
pbc.x = P->ipbc[0] * box_x.x + P->ipbc[1] * box_y.x;
pbc.y = P->ipbc[0] * box_x.y + P->ipbc[1] * box_y.y;
do_forces(cell_array + P->np, cell_array + P->nq, pbc,
&tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz,
&vir_yz, &vir_zx, &vir_xy);
}
}
#ifndef AR
/* If we don't use actio=reactio accross the cpus, we have do do
the force loop also on the other half of the neighbours for the
cells on the surface of the CPU */
/* compute forces for remaining pairs of cells */
for (n=0; n<nlists; ++n) {
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime)
#endif
for (k=npairs[n]; k<npairs2[n]; ++k) {
vektor pbc;
pair *P;
P = pairs[n] + k;
pbc.x = P->ipbc[0] * box_x.x + P->ipbc[1] * box_y.x;
pbc.y = P->ipbc[0] * box_x.y + P->ipbc[1] * box_y.y;
/* potential energy and virial are already complete; */
/* to avoid double counting, we update only the dummy tmpvec2 */
do_forces(cell_array + P->np, cell_array + P->nq, pbc,
tmpvec2, tmpvec2+1, tmpvec2+2, tmpvec2+3, tmpvec2+4,
tmpvec2+5, tmpvec2+6, tmpvec2+7);
}
}
#endif /* AR */
/* sum up results of different CPUs */
tmpvec1[0] = tot_pot_energy;
tmpvec1[1] = virial;
tmpvec1[2] = vir_xx;
tmpvec1[3] = vir_yy;
tmpvec1[4] = vir_xy;
MPI_Allreduce( tmpvec1, tmpvec2, 5, REAL, MPI_SUM, cpugrid);
tot_pot_energy = tmpvec2[0];
virial = tmpvec2[1];
vir_xx = tmpvec2[2];
vir_yy = tmpvec2[3];
vir_xy = tmpvec2[4];
#ifdef AR
send_forces(add_forces,pack_forces,unpack_forces);
#endif
}
/******************************************************************************
*
* fix_cells
*
* check if each atom is in the correct cell and on the correct CPU;
* move atoms that have left their cell or CPU
*
******************************************************************************/
void fix_cells(void)
{
int i,j,l,clone;
cell *p, *q;
ivektor coord, lcoord, dcpu, to_coord;
msgbuf *buf;
empty_mpi_buffers();
/* apply periodic boundary conditions */
do_boundaries();
/* for each cell in bulk */
for (i=cellmin.x; i < cellmax.x; ++i)
for (j=cellmin.y; j < cellmax.y; ++j) {
p = PTR_2D_V(cell_array, i, j, cell_dim);
/* loop over atoms in cell */
l=0;
while( l < p->n ) {
coord = cell_coord( ORT(p,l,X), ORT(p,l,Y) );
lcoord = local_cell_coord( coord );
/* see if atom is in wrong cell */
if ((lcoord.x == i) && (lcoord.y == j)) {
l++;
} else {
/* Calculate distance on CPU grid */
to_coord = cpu_coord_v( coord );
dcpu.x = to_coord.x - my_coord.x;
dcpu.y = to_coord.y - my_coord.y;
/* Consider PBC */
if (pbc_dirs.x == 1) {
if (cpu_dim.x == 1) dcpu.x = 0;
else dcpu.x -= ((int) (dcpu.x / (cpu_dim.x/2)) * cpu_dim.x);
}
if (pbc_dirs.y == 1) {
if (cpu_dim.y == 1) dcpu.y = 0;
else dcpu.y -= ((int) (dcpu.y / (cpu_dim.y/2)) * cpu_dim.y);
}
/* Check, if atom is on my cpu */
/* If not, copy into send buffer else move to correct cell */
buf = NULL;
if ((0<dcpu.x) && (cpu_dim.x>1)) {
buf = &send_buf_west;
}
else if ((0>dcpu.x) && (cpu_dim.x>1)) {
buf = &send_buf_east;
}
else if (0<dcpu.y) {
buf = &send_buf_south;
}
else if (0>dcpu.y) {
buf = &send_buf_north;
}
else { /* atom is on my cpu */
q = PTR_VV(cell_array,lcoord,cell_dim);
MOVE_ATOM(q, p, l);
#ifdef CLONE
if (l < p->n-nclones)
for (clone=1; clone<nclones; clone++)
MOVE_ATOM(q, p, l+clone);
else /* we are dealing with the last in the stack */
for (clone=1; clone<nclones; clone++)
MOVE_ATOM(q, p, l);
#endif
}
if (buf != NULL) {
int to_cpu = cpu_coord( coord );
copy_one_atom( buf, to_cpu, p, l, 1);
#ifdef CLONE
if (l < p->n-nclones)
for (clone=1; clone<nclones; clone++)
copy_one_atom( buf, to_cpu, p, l+clone, 1);
else /* we are dealing with the last in the stack */
for (clone=1; clone<nclones; clone++)
copy_one_atom( buf, to_cpu, p, l, 1);
#endif
}
}
}
}
/* send atoms to neighbbour CPUs */
send_atoms();
}
#ifdef SR
/******************************************************************************
*
* send_atoms - only used for fix_cells
*
******************************************************************************/
void send_atoms()
{
MPI_Status stat;
if (cpu_dim.x > 1) {
/* send east, receive west, move atoms from west to cells */
sendrecv_buf( &send_buf_east, nbeast, &recv_buf_west, nbwest, &stat);
MPI_Get_count( &stat, REAL, &recv_buf_west.n );
process_buffer( &recv_buf_west );
/* send west, receive east, move atoms from east to cells */
sendrecv_buf( &send_buf_west, nbwest, &recv_buf_east, nbeast, &stat );
MPI_Get_count( &stat, REAL, &recv_buf_east.n );
process_buffer( &recv_buf_east );
if (cpu_dim.y > 1) {
/* append atoms from east & west to north send buffer */
copy_atoms_buf( &send_buf_north, &recv_buf_west );
copy_atoms_buf( &send_buf_north, &recv_buf_east );
/* check special case cpu_dim.y==2 */
if (nbsouth!=nbnorth) {
/* append atoms from east & west to south send buffer */
copy_atoms_buf( &send_buf_south, &recv_buf_east );
copy_atoms_buf( &send_buf_south, &recv_buf_west );
}
}
}
if (cpu_dim.y > 1) {
/* send north, receive south, move atoms from south to cells */
sendrecv_buf( &send_buf_north, nbnorth, &recv_buf_south, nbsouth, &stat);
MPI_Get_count( &stat, REAL, &recv_buf_south.n );
process_buffer( &recv_buf_south );
/* send south, receive north, move atoms from north to cells */
sendrecv_buf( &send_buf_south, nbsouth, &recv_buf_north, nbnorth, &stat);
MPI_Get_count( &stat, REAL, &recv_buf_north.n );
process_buffer( &recv_buf_north );
}
}
#else /* not SR */
/******************************************************************************
*
* send_atoms - only used for fix_cells
*
******************************************************************************/
void send_atoms()
{
MPI_Status stateast[2], statwest[2], statnorth[2], statsouth[2];
MPI_Request reqeast[2], reqwest[2], reqnorth[2], reqsouth[2];
if (cpu_dim.x > 1) {
/* send east */
irecv_buf( &recv_buf_west, nbwest, &reqwest[1] );
isend_buf( &send_buf_east, nbeast, &reqwest[0] );
/* send west */
irecv_buf( &recv_buf_east, nbeast, &reqeast[1] );
isend_buf( &send_buf_west, nbwest, &reqeast[0] );
/* wait for atoms from west, move them to cells */
MPI_Waitall(2, reqwest, statwest);
MPI_Get_count( &statwest[1], REAL, &recv_buf_west.n );
process_buffer( &recv_buf_west );
/* wait for atoms from east, move them to cells */
MPI_Waitall(2, reqeast, stateast);
MPI_Get_count( &stateast[1], REAL, &recv_buf_east.n );
process_buffer( &recv_buf_east );
if (cpu_dim.y > 1) {
/* append atoms from east & west to north send buffer */
copy_atoms_buf( &send_buf_north, &recv_buf_west );
copy_atoms_buf( &send_buf_north, &recv_buf_east );
/* check special case cpu_dim.y==2 */
if (nbsouth!=nbnorth) {
/* append atoms from east & west to south send buffer */
copy_atoms_buf( &send_buf_south, &recv_buf_east );
copy_atoms_buf( &send_buf_south, &recv_buf_west );
}
}
}
if (cpu_dim.y > 1) {
/* send atoms north */
irecv_buf( &recv_buf_south, nbsouth, &reqsouth[1] );
isend_buf( &send_buf_north, nbnorth, &reqsouth[0] );
/* send atoms south */
irecv_buf( &recv_buf_north, nbnorth, &reqnorth[1] );
isend_buf( &send_buf_south, nbsouth, &reqnorth[0] );
/* wait for atoms from south, move them to cells */
MPI_Waitall(2, reqsouth, statsouth);
MPI_Get_count( &statsouth[1], REAL, &recv_buf_south.n );
process_buffer( &recv_buf_south );
/* Wait for atoms from north, move them to cells */
MPI_Waitall(2, reqnorth, statnorth);
MPI_Get_count( &statnorth[1], REAL, &recv_buf_north.n );
process_buffer( &recv_buf_north );
}
}
#endif
|
metadirective_device_kind_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c -triple aarch64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c -triple ppc64le-unknown-linux -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void bar(void);
void foo(void) {
#pragma omp metadirective when(device = {kind(any)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(host, cpu)} \
: parallel for num_threads(4))
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(host)} \
: parallel for)
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(nohost, gpu)} \
:) when(device = {kind(cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, host)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(gpu)} \
: target parallel for) default(parallel for)
for (int i = 0; i < 100; i++)
;
}
// CHECK-LABEL: define {{.+}} void @foo()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_1:@.+]] to void
// CHECK-NEXT: @__kmpc_push_num_threads
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_1]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif
|
adlm_sparse.h | #ifndef __LDA_ADLMSparse
#define __LDA_ADLMSparse
#include "concurrent_matrix.h"
#include "publisher_subscriber.h"
#include "types.h"
#include "cva.h"
#include "xorshift.h"
#include "sort.h"
#include <omp.h>
#include "thread_local.h"
#include <thread>
#include "clock.h"
// TODO: make the period of sync tunable
// Asynchronous Distributed List of Matrices
class ADLMSparse {
public:
static std::vector<int> ComputeDelta(int N, int R, MPI_Comm comm, int process_id, int process_size,
std::vector<int> &msg, CVA<SpEntry> &delta) {
// delta: RI: 32 bits, C: 30 bits, delta: 2 bits
// Encode msg
int NR = N * R;
auto T = omp_get_max_threads();
std::vector<int> ri_to_code(NR);
std::vector<int> code_to_ri(NR);
xorshift generator;
std::iota(ri_to_code.begin(), ri_to_code.end(), 0);
std::shuffle(ri_to_code.begin(), ri_to_code.end(), generator);
for (int i = 0; i < NR; i++) code_to_ri[ri_to_code[i]] = i;
//LOG(INFO) << "Ri to code " << ri_to_code;
//LOG(INFO) << "Code to ri " << code_to_ri;
//LOG(INFO) << NR;
Clock clk;
std::vector<long long> sorted_msg(msg.size() / 4);
//LOG(INFO) << "Encoded " << clk.toc() << " " << msg.size() * 2 / 1024 / 1024;
#pragma omp parallel for schedule(static, 10000)
for (size_t i = 0; i < msg.size()/4; i++) {
auto I = msg[i * 4];
auto r = msg[i * 4 + 1];
auto c = msg[i * 4 + 2];
auto delta = msg[i * 4 + 3];
auto ri = ri_to_code[I * R + r];
long long data = (((long long)ri) << 32) + (c << 2) + (delta + 1);
sorted_msg[i] = data;
}
//LOG(INFO) << "Encoded " << clk.toc();
// Sort msg
Sort::RadixSort(sorted_msg.data(), msg.size() / 4, 64);
std::vector<int>().swap(msg);
// Decode and form delta
CVA<SpEntry> local_delta(NR);
int blk_size = NR / T + 1;
#define getcol(x) (((x) & 4294967295LL) >> 2)
if (!sorted_msg.empty()) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
int blk_start = blk_size * tid;
int blk_end = std::min(blk_size * (tid+1), NR);
size_t msg_start = std::lower_bound(sorted_msg.begin(),
sorted_msg.end(), ((long long)blk_start) << 32) - sorted_msg.begin();
size_t msg_end = std::lower_bound(sorted_msg.begin(),
sorted_msg.end(), ((long long)blk_end) << 32) - sorted_msg.begin();
//LOG(INFO) << "blk start " << blk_start << " blk end " << blk_end
// << " msg start " << msg_start << " msg end " << msg_end;
size_t ptr = msg_start;
size_t ptr_next;
for (int ri = blk_start; ri < blk_end; ri++, ptr = ptr_next) {
for (ptr_next = ptr; ptr_next < msg_end
&& (sorted_msg[ptr_next]>>32) == ri; ptr_next++);
int num_keys = 0;
int last_col = -1;
for (size_t j = ptr; j < ptr_next; j++) {
auto c = getcol(sorted_msg[j]);
if (c != last_col) {
num_keys++;
last_col = c;
}
}
//LOG(INFO) << "Ri " << ri << " [" << ptr << ", " << ptr_next << "] nkeys " << num_keys;
local_delta.SetSize(ri, num_keys);
}
}
} else {
for (int r = 0; r < NR; r++)
local_delta.SetSize(r, 0);
}
local_delta.Init();
if (!sorted_msg.empty()) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
int blk_start = blk_size * tid;
int blk_end = std::min(blk_size * (tid+1), NR);
size_t msg_start = std::lower_bound(sorted_msg.begin(),
sorted_msg.end(), ((long long)blk_start) << 32) - sorted_msg.begin();
size_t msg_end = std::lower_bound(sorted_msg.begin(),
sorted_msg.end(), ((long long)blk_end) << 32) - sorted_msg.begin();
size_t ptr = msg_start;
size_t ptr_next;
for (int ri = blk_start; ri < blk_end; ri++, ptr = ptr_next) {
for (ptr_next = ptr; ptr_next < msg_end
&& (sorted_msg[ptr_next]>>32) == ri; ptr_next++);
auto row = local_delta.Get(ri);
int num_keys = 0;
int last_col = -1;
int cnt = 0;
for (size_t j = ptr; j < ptr_next; j++) {
auto c = getcol(sorted_msg[j]);
auto delta = (sorted_msg[j] & 3) - 1;
if (c != last_col) {
if (last_col != -1)
row[num_keys++] = SpEntry{last_col, cnt};
last_col = c;
cnt = delta;
} else
cnt += delta;
}
if (last_col != -1)
row[num_keys++] = SpEntry{last_col, cnt};
}
}
}
decltype(sorted_msg)().swap(sorted_msg);
//LOG(INFO) << "Local merged " << local_delta.R;
//return local_delta;
//for (int i = 0; i < local_delta.R; i++)
// LOG(INFO) << "Sz " << local_delta.Get(i).size();
// Alltoall
std::vector<SpEntry> data_recv_buffer;
std::vector<size_t> recv_offsets;
auto cvas = local_delta.Alltoall(comm, process_size,
recv_offsets, data_recv_buffer);
//LOG(INFO) << recv_offsets;
//LOG(INFO) << "Alltoall";
//for (int i = 0; i < local_delta.R; i++)
// LOG(INFO) << "OFfset " << cvas[0].offsets[i];
//for (int i = 0; i < local_delta.R; i++)
// LOG(INFO) << "Sz " << cvas[0].Get(i).size();
CVA<SpEntry> delta_slice(cvas[0].R);
ThreadLocal<vector<long long>> local_thread_kv;
ThreadLocal<vector<long long>> local_thread_temp;
ThreadLocal<vector<size_t>> local_thread_begin;
ThreadLocal<vector<size_t>> local_thread_end;
#pragma omp parallel for
for (int r = 0; r < cvas[0].R; r++) {
int tid = omp_get_thread_num();
auto &kv = local_thread_kv.Get();
auto &temp = local_thread_temp.Get();
auto &begin = local_thread_begin.Get();
auto &end = local_thread_end.Get();
begin.clear();
end.clear();
size_t size = 0;
for (auto &cva: cvas) size += cva.Get(r).size();
kv.resize(size);
temp.resize(size);
size = 0;
for (auto &cva: cvas) {
auto row = cva.Get(r);
//LOG(INFO) << row.size();
for (int i = 0; i < row.size(); i++)
kv[size + i] = ((long long) row[i].k << 32) + row[i].v;
//LOG(INFO) << size;
begin.push_back(size);
end.push_back(size += row.size());
}
//LOG(INFO) << "Before MM " << begin;
Sort::MultiwayMerge(kv.data(), temp.data(),
begin, end);
//LOG(INFO) << "After MM";
// Write back
int Kd = 0;
int last = -1;
for (auto &entry: kv) {
Kd += (entry >> 32) != last;
last = (entry >> 32);
}
delta_slice.SetSize(r, Kd);
//LOG(INFO) << "After set " << Kd;
}
//LOG(INFO) << "Finished";
delta_slice.Init();
#pragma omp parallel for
for (int r = 0; r < cvas[0].R; r++) {
int tid = omp_get_thread_num();
auto &kv = local_thread_kv.Get();
auto &temp = local_thread_temp.Get();
auto &begin = local_thread_begin.Get();
auto &end = local_thread_end.Get();
begin.clear();
end.clear();
size_t size = 0;
for (auto &cva: cvas) size += cva.Get(r).size();
kv.resize(size);
temp.resize(size);
size = 0;
for (auto &cva: cvas) {
auto row = cva.Get(r);
for (int i = 0; i < row.size(); i++)
kv[size + i] = ((long long) row[i].k << 32) + row[i].v;
begin.push_back(size);
end.push_back(size += row.size());
}
Sort::MultiwayMerge(kv.data(), temp.data(),
begin, end);
// Write back
int mask = (1LL << 32) - 1;
auto b = delta_slice.Get(r);
int last = -1;
int Kd = 0;
for (auto &entry: kv) {
if ((entry >> 32) != last)
b[Kd++] = SpEntry{(entry >> 32), entry & mask};
else
b[Kd - 1].v += (entry & mask);
last = (entry >> 32);
}
}
for (auto &cva: cvas)
cva.Free();
//LOG(INFO) << "Merged";
// Allgather
CVA<SpEntry> global_delta(NR);
global_delta.Allgather(comm, process_size, delta_slice);
delta_slice.Free();
//LOG(INFO) << "Allgather";
//return global_delta;
// substract self and map back delta
//CVA<SpEntry> delta(NR);
//#pragma omp parallel for
for (int r = 0; r < NR; r++) {
auto r1 = global_delta.Get(r);
auto r2 = local_delta.Get(r);
int i = 0;
int j = 0;
int last_k = -1;
int last_v = 0;
int num_ks = 0;
while (i < r1.size() || j < r2.size()) {
// Pick up the next
SpEntry entry;
if (i < r1.size() && (j == r2.size() || r1[i].k < r2[j].k)) {
entry = r1[i++];
} else {
entry = r2[j++];
entry.v = -entry.v;
}
//LOG_IF(INFO, process_id == 0)
// << r << " " << entry.k << " " << entry.v;
if (entry.k != last_k) {
if (last_k != -1 && last_v != 0) ++num_ks;
last_k = entry.k;
last_v = entry.v;
} else
last_v += entry.v;
}
if (last_k != -1 && last_v != 0) ++num_ks;
delta.SetSize(code_to_ri[r], num_ks);
}
delta.Init();
std::vector<int> sizes(NR);
#pragma omp parallel for
for (int r = 0; r < NR; r++) {
auto r1 = global_delta.Get(r);
auto r2 = local_delta.Get(r);
auto ro = delta.Get(code_to_ri[r]);
int i = 0;
int j = 0;
int last_k = -1;
int last_v = 0;
int num_ks = 0;
while (i < r1.size() || j < r2.size()) {
// Pick up the next
SpEntry entry;
if (i < r1.size() && (j == r2.size() || r1[i].k < r2[j].k)) {
entry = r1[i++];
} else {
entry = r2[j++];
entry.v = -entry.v;
}
if (entry.k != last_k) {
if (last_k != -1 && last_v != 0)
ro[num_ks++] = SpEntry{last_k, last_v};
last_k = entry.k;
last_v = entry.v;
} else
last_v += entry.v;
}
if (last_k != -1 && last_v != 0)
ro[num_ks++] = SpEntry{last_k, last_v};
auto &sz = sizes[code_to_ri[r]];
if (r1.size()) sz = std::max(sz, (int)r1[r1.size()-1].k + 1);
if (r2.size()) sz = std::max(sz, (int)r2[r2.size()-1].k + 1);
}
return sizes;
}
public:
ADLMSparse(int num_data, int num_rows, int max_num_threads, int base_column_shift = 7) :
send_buffer(max_num_threads), N(num_data), R(num_rows) {
for (int i = 0; i < N; i++)
data.emplace_back(num_rows, base_column_shift);
MPI_Comm_dup(MPI_COMM_WORLD, &comm);
stop = 0;
barrier = 0;
barrier_met = 0;
num_syncs = 0;
MPI_Comm_rank(comm, &process_id);
MPI_Comm_size(comm, &process_size);
sync_thread = std::move(std::thread([&]()
{
while (1) {
int global_barrier;
int global_stop;
MPI_Allreduce(&barrier, &global_barrier, 1,
MPI_INT, MPI_SUM, comm);
MPI_Allreduce(&stop, &global_stop, 1,
MPI_INT, MPI_SUM, comm);
{
std::lock_guard<std::mutex> lock(mutex_);
to_send.swap(sending);
}
//LOG(INFO) << sending.size() << " " << N << " " << R;
//LOG(INFO) << "Sync " << Capacity() * 4 / 1048576 << " " << sending.size();
CVA<SpEntry> delta(N * R);
size_t num_updated = sending.size();
auto sending_bak = sending;
auto sizes = ComputeDelta(N, R, comm, process_id, process_size, sending, delta);
sending.clear();
++num_syncs;
for (int n = 0; n < N; n++)
for (int r = 0; r < R; r++) {
data[n].Grow(sizes[n * R + r]);
auto row = delta.Get(n * R + r);
for (auto &entry: row)
data[n].Inc(r, entry.k, entry.v);
}
size_t global_num_updated;
MPI_Allreduce(&num_updated, &global_num_updated, 1,
MPI_UNSIGNED_LONG_LONG, MPI_SUM, comm);
if (global_barrier == process_size && global_num_updated == 0) {
barrier_met = 1;
cv.notify_all();
}
if (global_stop == process_size)
break;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}));
}
~ADLMSparse() {
Barrier();
//LOG(INFO) << "Barrier";
stop = 1;
sync_thread.join();
//LOG(INFO) << "Join";
}
// Note: concurrent call must have different thread_id
void Grow(int thread_id, size_t index, size_t new_num_columns) {
data[index].Grow(new_num_columns);
auto &buffer = send_buffer[thread_id];
buffer.push_back(index);
buffer.push_back(0);
buffer.push_back(new_num_columns - 1);
buffer.push_back(0);
}
// Note: concurrent call must have different thread_id
void Inc(int thread_id, size_t index, size_t r, size_t c) {
data[index].Inc(r, c);
auto &buffer = send_buffer[thread_id];
buffer.push_back(index);
buffer.push_back(r);
buffer.push_back(c);
buffer.push_back(1);
}
// Note: concurrent call must have different thread_id
void Dec(int thread_id, size_t index, size_t r, size_t c) {
data[index].Dec(r, c);
auto &buffer = send_buffer[thread_id];
buffer.push_back(index);
buffer.push_back(r);
buffer.push_back(c);
buffer.push_back(-1);
}
// Note: concurrent call must have different thread_id
void Publish(int thread_id) {
auto &buffer = send_buffer[thread_id];
if (!buffer.empty()) {
std::lock_guard<std::mutex> lock(mutex_);
to_send.insert(to_send.end(), buffer.begin(), buffer.end());
buffer.clear();
}
}
const ConcurrentMatrix<TCount>& GetMatrix(size_t index) {
return data[index];
}
TCount Get(size_t index, size_t r, size_t c) {
return data[index].Get(r, c);
}
void Set(size_t index, size_t r, size_t c, TCount value) {
data[index].Set(r, c, value);
}
TCount GetSum(size_t index, size_t c) {
return data[index].GetSum(c);
}
void SetSum(size_t index, size_t c, TCount value) {
data[index].SetSum(c, value);
}
size_t GetC(size_t index) {
return data[index].GetC();
}
void Barrier() {
std::unique_lock<std::mutex> lock(mutex_);
barrier = 1;
cv.wait(lock, [&](){ return barrier_met; });
barrier = 0;
barrier_met = 0;
num_syncs = 0;
}
void Compress() {
Barrier();
for (auto &m: data)
m.Compress();
}
int GetNumSyncs() {
return num_syncs;
}
size_t GetBytesCommunicated() {
return 0;
}
size_t Capacity() {
size_t cap = 0;
for (auto &d: data)
cap += d.Capacity();
for (auto &buff: send_buffer)
cap += send_buffer.capacity();
cap += to_send.capacity() + sending.capacity();
return cap;
}
private:
int N, R;
std::vector<ConcurrentMatrix<TCount>> data;
std::vector<std::vector<int>> send_buffer;
std::vector<int> to_send, sending;
std::thread sync_thread;
std::mutex mutex_;
std::condition_variable cv;
int stop, barrier, barrier_met, num_syncs;
MPI_Comm comm;
int process_id, process_size;
};
#endif
|
index.h | #ifndef GBWTGRAPH_CONSTRUCTION_H
#define GBWTGRAPH_CONSTRUCTION_H
#include <cstdlib>
#include <functional>
#include <omp.h>
#include <gbwtgraph/gbwtgraph.h>
#include <gbwtgraph/minimizer.h>
/*
index.h: Minimizer index construction from GBWTGraph.
*/
namespace gbwtgraph
{
//------------------------------------------------------------------------------
/*
Index the haplotypes in the graph. Insert the minimizers into the provided index.
Function argument get_payload is used to generate the payload for each position
stored in the index.
The number of threads can be set through OMP.
*/
template<class KeyType>
void
index_haplotypes(const GBWTGraph& graph, MinimizerIndex<KeyType>& index,
const std::function<payload_type(const pos_t&)>& get_payload)
{
typedef typename MinimizerIndex<KeyType>::minimizer_type minimizer_type;
int threads = omp_get_max_threads();
// Minimizer caching. We only generate the payloads after we have removed duplicate positions.
std::vector<std::vector<std::pair<minimizer_type, pos_t>>> cache(threads);
constexpr size_t MINIMIZER_CACHE_SIZE = 1024;
auto flush_cache = [&](int thread_id)
{
std::vector<std::pair<minimizer_type, pos_t>>& current_cache = cache[thread_id];
gbwt::removeDuplicates(current_cache, false);
std::vector<payload_type> payload;
payload.reserve(current_cache.size());
for(size_t i = 0; i < current_cache.size(); i++) { payload.push_back(get_payload(current_cache[i].second)); }
#pragma omp critical (minimizer_index)
{
for(size_t i = 0; i < current_cache.size(); i++)
{
index.insert(current_cache[i].first, current_cache[i].second, payload[i]);
}
}
cache[thread_id].clear();
};
// Minimizer finding.
auto find_minimizers = [&](const std::vector<handle_t>& traversal, const std::string& seq)
{
std::vector<minimizer_type> minimizers = index.minimizers(seq); // Calls syncmers() when appropriate.
auto iter = traversal.begin();
size_t node_start = 0;
int thread_id = omp_get_thread_num();
for(minimizer_type& minimizer : minimizers)
{
if(minimizer.empty()) { continue; }
// Find the node covering minimizer starting position.
size_t node_length = graph.get_length(*iter);
while(node_start + node_length <= minimizer.offset)
{
node_start += node_length;
++iter;
node_length = graph.get_length(*iter);
}
pos_t pos { graph.get_id(*iter), graph.get_is_reverse(*iter), minimizer.offset - node_start };
if(minimizer.is_reverse) { pos = reverse_base_pos(pos, node_length); }
if(!Position::valid_offset(pos))
{
#pragma omp critical (cerr)
{
std::cerr << "index_haplotypes(): Node offset " << offset(pos) << " is too large" << std::endl;
}
std::exit(EXIT_FAILURE);
}
cache[thread_id].emplace_back(minimizer, pos);
}
if(cache[thread_id].size() >= MINIMIZER_CACHE_SIZE) { flush_cache(thread_id); }
};
/*
Index the minimizers.
We do a lot of redundant work by traversing both orientations and finding almost the same minimizers
in each orientation. If we consider only the windows starting in forward (reverse) orientation,
we may skip windows that cross from a reverse node to a forward node (from a forward node to a
reverse node).
*/
for_each_haplotype_window(graph, index.window_bp(), find_minimizers, (threads > 1));
for(int thread_id = 0; thread_id < threads; thread_id++) { flush_cache(thread_id); }
}
//------------------------------------------------------------------------------
} // namespace gbwtgraph
#endif // GBWTGRAPH_CONSTRUCTION_H
|
ParallelVertexFilter.h | /**
* @file
* This file is part of PUMGen
*
* For conditions of distribution and use, please see the copyright
* notice in the file 'COPYING' at the root directory of this package
* and the copyright notice at https://github.com/SeisSol/PUMGen
*
* @copyright 2017 Technical University of Munich
* @author Sebastian Rettenberger <sebastian.rettenberger@tum.de>
*
* @remark This class is taken from XdmfWriter (https://github.com/TUM-I5/XdmfWriter)
*/
#ifndef PARALLEL_VERTEX_FILTER_H
#define PARALLEL_VERTEX_FILTER_H
#include <mpi.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <cstdint>
#include <vector>
#include "utils/logger.h"
/**
* Filters duplicate vertices in parallel
*/
class ParallelVertexFilter
{
private:
/**
* Compares 3D-vertex indices according to the vertices
*/
class IndexedVertexComparator
{
private:
const double *m_vertices;
public:
IndexedVertexComparator(const double *vertices)
: m_vertices(vertices)
{
}
bool operator() (unsigned int i, unsigned int j)
{
i *= 3;
j *= 3;
return (m_vertices[i] < m_vertices[j])
|| (m_vertices[i] == m_vertices[j] && m_vertices[i+1] < m_vertices[j+1])
|| (m_vertices[i] == m_vertices[j] && m_vertices[i+1] == m_vertices[j+1]
&& m_vertices[i+2] < m_vertices[j+2]);
}
};
private:
/** The communicator we use */
MPI_Comm m_comm;
/** Our rank */
int m_rank;
/** #Processes */
int m_numProcs;
/** Global id after filtering */
unsigned long *m_globalIds;
/** Number of local vertices after filtering */
unsigned int m_numLocalVertices;
/** Local vertices after filtering */
double *m_localVertices;
public:
ParallelVertexFilter(MPI_Comm comm = MPI_COMM_WORLD)
: m_comm(comm), m_globalIds(0L), m_numLocalVertices(0), m_localVertices(0L)
{
MPI_Comm_rank(comm, &m_rank);
MPI_Comm_size(comm, &m_numProcs);
if (vertexType == MPI_DATATYPE_NULL) {
MPI_Type_contiguous(3, MPI_DOUBLE, &vertexType);
MPI_Type_commit(&vertexType);
}
}
virtual ~ParallelVertexFilter()
{
delete [] m_globalIds;
delete [] m_localVertices;
}
/**
* @param vertices Vertices that should be filtered, must have the size <code>numVertices * 3</code>
*/
void filter(unsigned int numVertices, const double *vertices)
{
// Chop the last 4 bits to avoid numerical errors
double *roundVertices = new double[numVertices*3];
removeRoundError(vertices, numVertices*3, roundVertices);
// Create indices and sort them locally
unsigned int *sortIndices = new unsigned int[numVertices];
createSortedIndices(roundVertices, numVertices, sortIndices);
// Select BUCKETS_PER_RANK-1 splitter elements
double localSplitters[BUCKETS_PER_RANK-1];
#if 0 // Use omp only if we create a larger amount of buckets
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
#endif
for (int i = 0; i < BUCKETS_PER_RANK-1; i++) {
unsigned long vrtxIndex = static_cast<unsigned long>(i)
* static_cast<unsigned long>(numVertices)
/ static_cast<unsigned long>(BUCKETS_PER_RANK-1);
assert(vrtxIndex < numVertices);
localSplitters[i] = roundVertices[sortIndices[vrtxIndex]*3];
}
// Collect all splitter elements on rank 0
double *allSplitters = 0L;
if (m_rank == 0)
allSplitters = new double[m_numProcs * (BUCKETS_PER_RANK-1)];
MPI_Gather(localSplitters, BUCKETS_PER_RANK-1, MPI_DOUBLE,
allSplitters, BUCKETS_PER_RANK-1, MPI_DOUBLE,
0, m_comm);
// Sort splitter elements
if (m_rank == 0)
std::sort(allSplitters, allSplitters + (m_numProcs * (BUCKETS_PER_RANK-1)));
// Distribute splitter to all processes
double *splitters = new double[m_numProcs-1];
if (m_rank == 0) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int i = 0; i < m_numProcs-1; i++) {
unsigned long spltIndex = (i+1) * (BUCKETS_PER_RANK-1);
assert(spltIndex < static_cast<unsigned int>(m_numProcs * (BUCKETS_PER_RANK-1)));
splitters[i] = allSplitters[spltIndex];
}
}
MPI_Bcast(splitters, m_numProcs-1, MPI_DOUBLE, 0, m_comm);
delete [] allSplitters;
// Determine the bucket for each vertex
unsigned int *bucket = new unsigned int[numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++) {
double* ub = std::upper_bound(splitters, splitters+m_numProcs-1, roundVertices[i*3]);
bucket[i] = ub-splitters;
}
delete [] roundVertices;
delete [] splitters;
// Determine the (local and total) bucket size
int *bucketSize = new int[m_numProcs];
memset(bucketSize, 0, sizeof(int)*m_numProcs);
for (unsigned int i = 0; i < numVertices; i++)
bucketSize[bucket[i]]++;
delete [] bucket;
// Tell all processes what we are going to send them
int *recvSize = new int[m_numProcs];
MPI_Alltoall(bucketSize, 1, MPI_INT, recvSize, 1, MPI_INT, m_comm);
unsigned int numSortVertices = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static) reduction(+: numSortVertices)
#endif
for (int i = 0; i < m_numProcs; i++)
numSortVertices += recvSize[i];
// Create sorted send buffer
double *sendVertices = new double[3 * numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++) {
memcpy(&sendVertices[i*3], &vertices[sortIndices[i]*3], sizeof(double)*3);
}
// Allocate buffer for the vertices and exchange them
double *sortVertices = new double[3 * numSortVertices];
int *sDispls = new int[m_numProcs];
int *rDispls = new int[m_numProcs];
sDispls[0] = 0;
rDispls[0] = 0;
for (int i = 1; i < m_numProcs; i++) {
sDispls[i] = sDispls[i-1] + bucketSize[i-1];
rDispls[i] = rDispls[i-1] + recvSize[i-1];
}
MPI_Alltoallv(sendVertices, bucketSize, sDispls, vertexType, sortVertices, recvSize, rDispls, vertexType, m_comm);
delete [] sendVertices;
// Chop the last 4 bits to avoid numerical errors
roundVertices = new double[numSortVertices*3];
removeRoundError(sortVertices, numSortVertices*3, roundVertices);
// Create indices and sort them (such that the vertices are sorted)
unsigned int *sortSortIndices = new unsigned int[numSortVertices];
createSortedIndices(roundVertices, numSortVertices, sortSortIndices);
delete [] roundVertices;
// Initialize the global ids we send back to the other processors
unsigned long *gids = new unsigned long[numSortVertices];
if (numSortVertices > 0) {
gids[sortSortIndices[0]] = 0;
for (unsigned int i = 1; i < numSortVertices; i++) {
if (equals(&sortVertices[sortSortIndices[i-1]*3], &sortVertices[sortSortIndices[i]*3]))
gids[sortSortIndices[i]] = gids[sortSortIndices[i-1]];
else
gids[sortSortIndices[i]] = gids[sortSortIndices[i-1]] + 1;
}
}
// Create the local vertices list
if (numSortVertices > 0)
m_numLocalVertices = gids[sortSortIndices[numSortVertices-1]] + 1;
else
m_numLocalVertices = 0;
delete [] m_localVertices;
m_localVertices = new double[m_numLocalVertices * 3];
for (unsigned int i = 0; i < numSortVertices; i++)
memcpy(&m_localVertices[gids[i]*3], &sortVertices[i*3], sizeof(double)*3);
delete [] sortVertices;
// Get the vertices offset
unsigned int offset = m_numLocalVertices;
MPI_Scan(MPI_IN_PLACE, &offset, 1, MPI_UNSIGNED, MPI_SUM, m_comm);
offset -= m_numLocalVertices;
// Add offset to the global ids
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numSortVertices; i++)
gids[i] += offset;
// Send result back
unsigned long *globalIds = new unsigned long[numVertices];
MPI_Alltoallv(gids, recvSize, rDispls, MPI_UNSIGNED_LONG,
globalIds, bucketSize, sDispls, MPI_UNSIGNED_LONG, m_comm);
delete [] bucketSize;
delete [] recvSize;
delete [] sDispls;
delete [] rDispls;
delete [] gids;
// Assign the global ids to the correct vertices
delete [] m_globalIds;
m_globalIds = new unsigned long[numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++)
m_globalIds[sortIndices[i]] = globalIds[i];
delete [] sortIndices;
delete [] globalIds;
}
/**
* @return The list of the global identifiers after filtering
*/
const unsigned long* globalIds() const
{
return m_globalIds;
}
/**
* @return Number of vertices this process is responsible for after filtering
*/
unsigned int numLocalVertices() const
{
return m_numLocalVertices;
}
/**
* @return The list of vertices this process is responsible for after filtering
*/
const double* localVertices() const
{
return m_localVertices;
}
private:
/**
* Removes round errors of double values by setting the last 4 bits
* (of the significand) to zero.
*
* @warning Only works if <code>value</code> ist not nan or infinity
* @todo This should work for arbitrary precision
*/
static double removeRoundError(double value)
{
static const uint64_t mask = ~0xF;
union FloatUnion {
double f;
uint64_t bits;
};
FloatUnion result;
result.f = value;
result.bits &= mask;
return result.f;
}
/**
* Removes the round errors using {@link removeRoundError(double)}
*
* @param values The list of floating point values
* @param count Number of values
* @param[out] roundValues The list of rounded values
* (the caller is responsible for allocating the memory)
*/
static void removeRoundError(const double *values, unsigned int count, double* roundValues)
{
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < count; i++)
roundValues[i] = removeRoundError(values[i]);
}
/**
* Creates the list of sorted indices for the vertices.
* The caller is responsible for allocating the memory.
*/
static void createSortedIndices(const double *vertices, unsigned int numVertices,
unsigned int *sortedIndices)
{
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++)
sortedIndices[i] = i;
IndexedVertexComparator comparator(vertices);
std::sort(sortedIndices, sortedIndices+numVertices, comparator);
}
/**
* Compares to vertices for equality
* Assumes that the rounding errors are removed.
*/
static bool equals(const double* vertexA, const double* vertexB)
{
return vertexA[0] == vertexB[0]
&& vertexA[1] == vertexB[1]
&& vertexA[2] == vertexB[2];
}
/** MPI data type consisting of three doubles */
static MPI_Datatype vertexType;
/** The total buckets we create is <code>BUCKETS_PER_RANK * numProcs</code> */
const static int BUCKETS_PER_RANK = 8;
};
#endif // PARALLEL_VERTEX_FILTER_H
|
mexutil.h | ///////////////////////////////////////////////////////////////////////////////
//
// Name: mexutil.h
// Purpose: Macros and helper functions for creating MATLAB MEX-files.
// Author: Daeyun Shin <daeyun@dshin.org>
// Created: 01.15.2015
// Modified: 02.01.2015
// Version: 0.1
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//
///////////////////////////////////////////////////////////////////////////////
#pragma once
#include "mex.h"
#include <string>
#include <algorithm>
#include <cctype>
#include <vector>
#include <sstream>
#include <iostream>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef N_LHS_VAR
#define N_LHS_VAR nargout
#endif
#ifndef N_RHS_VAR
#define N_RHS_VAR nargin
#endif
#ifndef MEX_COMPONENT_NAME
#define MEX_COMPONENT_NAME "MATLAB"
#endif
#define BOLD(str) "<strong>" str "</strong>"
#define ORANGE(str) "[\b" str "]\b"
namespace mexutil {
enum CompOp { EQ, GT, LT, NEQ, GE, LE };
enum ArgType {
kDouble,
kSingle,
kStruct,
kLogical,
kChar,
kInt8,
kUint8,
kInt16,
kUint16,
kInt32,
kUint32
};
// Redirect stderr to a file or stringstream .
void CaptureErrorMsg(std::stringstream &stderr_content);
void CaptureErrorMsg(const std::string &filename);
// e.g. double* mat = GetArg<kDouble,EQ,GT>(0, prhs, 3, 3); Throws an error if
// prhs[0] doesn't have exactly 3 rows or have less than 3 columns. 0s are
// ignored.
template <ArgType argtype, CompOp row_comp = EQ, CompOp col_comp = EQ>
void *GetArg(const mwSize index, const mxArray *input[], mwSize nrows = 0,
mwSize ncols = 0);
// Constructs the identifier token used in error messages.
std::string MatlabIdStringFromFilename(std::string str);
std::string FilenameFromPath(std::string str);
// Retrieves the workspace global variable mexVerboseLevel (default: 1).
int VerboseLevel();
const int kDefaultVerboseLevel = 1;
const std::string kFilename = FilenameFromPath(__FILE__);
const std::string kFunctionIdentifier = MatlabIdStringFromFilename(kFilename);
const int kVerboseLevel = VerboseLevel();
// Force pass-by-value behavior to prevent accidentally modifying shared
// memory content in-place. Undocumented.
// http://undocumentedmatlab.com/blog/matlab-mex-in-place-editing
extern "C" bool mxUnshareArray(mxArray *array_ptr, bool noDeepCopy);
// Copy and transpose.
template <size_t nrows_in, typename T>
void Transpose(const std::vector<T> &in, T *out);
// Useful when zero-based indexing is used.
template <size_t nrows_in, typename T>
void TransposeAddOne(const std::vector<T> &in, T *out);
mxArray *UnshareArray(int index, const mxArray *prhs[]) {
mxArray *unshared = const_cast<mxArray *>(prhs[index]);
mxUnshareArray(unshared, true);
return unshared;
}
std::string MatlabIdStringFromFilename(std::string str) {
(void)(MatlabIdStringFromFilename);
auto is_invalid_id_char =
[](char ch) { return !(isalnum((int)ch) || ch == '_'); };
if (int i = str.find_first_of('.')) str = str.substr(0, i);
if (!isalpha(str[0])) str = "mex_" + str;
std::replace_if(str.begin(), str.end(), is_invalid_id_char, '_');
return str;
}
std::string FilenameFromPath(std::string str) {
(void)(FilenameFromPath);
if (int i = str.find_last_of('/')) str = str.substr(i + 1, str.length());
return str;
}
int VerboseLevel() {
(void)(VerboseLevel);
mxArray *ptr = mexGetVariable("global", "mexVerboseLevel");
if (ptr == NULL) return kDefaultVerboseLevel;
return mxGetScalar(ptr);
}
void CaptureErrorMsg(std::stringstream &stderr_content) {
std::cerr.rdbuf(stderr_content.rdbuf());
}
void CaptureErrorMsg(const std::string &filename) {
freopen(filename.c_str(), "a", stderr);
}
template <size_t nrows_in, typename T>
void Transpose(const std::vector<T> &in, T *out) {
const size_t ncols_in = in.size() / nrows_in;
#pragma omp parallel for
for (size_t i = 0; i < in.size(); i += nrows_in) {
for (size_t j = 0; j < nrows_in; ++j) {
*(out + (i / nrows_in) + ncols_in * j) = in[i + j];
}
}
}
template <size_t nrows_in, typename T>
void TransposeAddOne(const std::vector<T> &in, T *out) {
const size_t ncols_in = in.size() / nrows_in;
#pragma omp parallel for
for (size_t i = 0; i < in.size(); i += nrows_in) {
for (size_t j = 0; j < nrows_in; ++j) {
*(out + (i / nrows_in) + ncols_in * j) = in[i + j] + 1;
}
}
}
// e.g. LEVEL(2, MPRINTF("Not printed if logging level is less than 2."))
#define LEVEL(verbose_level, expr) \
{ \
if (kVerboseLevel >= verbose_level) expr; \
}
// Construct an identifier string e.g. MATLAB:mexutil:myErrorIdentifier
#define MEX_IDENTIFIER(mnemonic) \
(std::string(MEX_COMPONENT_NAME ":") + kFunctionIdentifier + \
std::string(":" mnemonic)).c_str()
// Assert number of input variables.
#define N_IN_RANGE(min, max) \
{ \
if (N_RHS_VAR < min || N_RHS_VAR > max) { \
mexErrMsgIdAndTxt(MEX_IDENTIFIER("InputSizeError"), \
"Number of inputs must be between %d and %d.", min, \
max); \
} \
}
// Assert number of output variables.
#define N_OUT_RANGE(min, max) \
{ \
if (N_LHS_VAR < min || N_LHS_VAR > max) { \
mexErrMsgIdAndTxt(MEX_IDENTIFIER("OutputSizeError"), \
"Number of outputs must be between %d and %d.", min, \
max); \
} \
}
#define N_IN(num) \
{ \
if (N_RHS_VAR != num) { \
mexErrMsgIdAndTxt(MEX_IDENTIFIER("InputSizeError"), \
"Number of inputs must be %d.", num); \
} \
}
#define N_OUT(num) \
{ \
if (N_LHS_VAR != num) { \
mexErrMsgIdAndTxt(MEX_IDENTIFIER("OutputSizeError"), \
"Number of outputs must be %d.", num); \
} \
}
#define VAR(name) \
{ \
std::ostringstream val_str; \
val_str << name; \
DisplayVariable(#name, val_str.str(), sizeof(name), (void *)&name, \
__FILE__, __LINE__, __func__); \
}
// Print message to MATLAB console.
// e.g.MPRINTF(BOLD("%d"), argc);
#define MPRINTF(...) \
{ \
mexPrintf(__VA_ARGS__); \
mexEvalString("drawnow;"); \
}
// Display error and exit.
#define ERR_EXIT(errname, ...) \
{ mexErrMsgIdAndTxt(MEX_IDENTIFIER(errname), ##__VA_ARGS__); }
// Macros starting with an underscore are internal.
#define _ASSERT(condition) \
{ \
if (!(condition)) { \
MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \
mexErrMsgTxt("assertion " #condition " failed\n", ); \
} \
}
#define _ASSERT_MSG(condition, msg) \
{ \
if (!(condition)) { \
MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \
mexErrMsgTxt("assertion " #condition " failed\n%s\n", msg); \
} \
}
#define _CHOOSE_MACRO(a, x, func, ...) func
#define ASSERT(condition, ...) \
_CHOOSE_MACRO(, ##__VA_ARGS__, _ASSERT_MSG(__VA_ARGS__), _ASSERT(__VA_ARGS__))
#define ASSERT_FMT(condition, fmt, ...) \
{ \
if (!(condition)) { \
MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \
mexErrMsgIdAndTxt(MEX_IDENTIFIER("AssertionError"), \
"assertion " #condition " failed\n" fmt "\n", \
##__VA_ARGS__); \
} \
}
template <ArgType argtype, CompOp row_comp, CompOp col_comp>
void *GetArg(mwSize index, const mxArray *input[], mwSize nrows, mwSize ncols) {
if (nrows > 0) {
switch (row_comp) {
case EQ:
ASSERT_FMT(mxGetM(input[index]) == nrows,
"size(input[%d], 1) must be %d.", index, nrows);
break;
case GT:
ASSERT_FMT(mxGetM(input[index]) > nrows,
"size(input[%d], 1) must be greater than %d.", index, nrows);
break;
case LT:
ASSERT_FMT(mxGetM(input[index]) < nrows,
"size(input[%d], 1) must be less than %d.", index, nrows);
break;
case NEQ:
ASSERT_FMT(mxGetM(input[index]) != nrows,
"size(input[%d], 1) must be not equal %d.", index, nrows);
break;
case GE:
ASSERT_FMT(mxGetM(input[index]) >= nrows,
"size(input[%d], 1) must be at least %d.", index, nrows);
break;
case LE:
ASSERT_FMT(mxGetM(input[index]) <= nrows,
"size(input[%d], 1) can be at most %d.", index, nrows);
break;
default:
break;
}
}
if (ncols > 0) {
switch (col_comp) {
case EQ:
ASSERT_FMT(mxGetN(input[index]) == ncols,
"size(input[%d], 2) must be %d.", index, ncols);
break;
case GT:
ASSERT_FMT(mxGetN(input[index]) > ncols,
"size(input[%d], 2) must be greater than %d.", index, ncols);
break;
case LT:
ASSERT_FMT(mxGetN(input[index]) < ncols,
"size(input[%d], 2) must be less than %d.", index, ncols);
break;
case NEQ:
ASSERT_FMT(mxGetN(input[index]) != ncols,
"size(input[%d], 2) must be not equal %d.", index, ncols);
break;
case GE:
ASSERT_FMT(mxGetN(input[index]) >= ncols,
"size(input[%d], 2) must be at least %d.", index, ncols);
break;
case LE:
ASSERT_FMT(mxGetN(input[index]) <= ncols,
"size(input[%d], 2) can be at most %d.", index, ncols);
break;
default:
break;
}
}
switch (argtype) {
case kDouble:
ASSERT_FMT(mxIsDouble(input[index]),
"Invalid data type for input index %d.", index);
return mxGetPr(input[index]); // double*
case kSingle:
ASSERT_FMT(mxIsSingle(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // float*
case kStruct:
ASSERT_FMT(mxIsStruct(input[index]),
"Invalid data type for input index %d.", index);
// TODO
ERR_EXIT("UnknownDataTypeError", "Not implemented");
break;
case kLogical:
ASSERT_FMT(mxIsLogical(input[index]),
"Invalid data type for input index %d.", index);
return mxGetLogicals(input[index]); // mxLogical*
case kChar:
ASSERT_FMT(mxIsChar(input[index]),
"Invalid data type for input index %d.", index);
return mxGetChars(input[index]); // char*
case kInt8:
ASSERT_FMT(mxIsInt8(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // int8_t*
case kUint8:
ASSERT_FMT(mxIsUint8(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // uint8_t*
case kInt16:
ASSERT_FMT(mxIsInt16(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // int16_t*
case kUint16:
ASSERT_FMT(mxIsUint16(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // uint16_t*
case kInt32:
ASSERT_FMT(mxIsInt32(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // int32_t*
case kUint32:
ASSERT_FMT(mxIsUint32(input[index]),
"Invalid data type for input index %d.", index);
return mxGetData(input[index]); // uint32_t*
default:
ERR_EXIT("UnknownDataTypeError", "Unknown argtype");
}
}
}
void DisplayVariable(std::string name, std::string value, size_t size,
void *ptr, std::string file, int line, std::string func) {
MPRINTF("[INFO] (%s:%d %s) %s=%s &%s=%p %d\n", file.c_str(), line,
func.c_str(), name.c_str(), value.c_str(), name.c_str(), ptr,
(int)size);
}
|
paraGraph.h | #ifndef __PARAGRAPH_H__
#define __PARAGRAPH_H__
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "vertex_set.h"
#include "graph.h"
#include "mic.h"
#include "ts_hashtable.h"
#include "util.h"
#include <time.h>
#include <immintrin.h>
#define CHUNK_SIZE 32
#define MAX_THREAD_NUM 256
/*
* edgeMap --
*
* Students will implement this function.
*
* The input argument f is a class with the following methods defined:
* bool update(Vertex src, Vertex dst)
* bool cond(Vertex v)
*
* See apps/bfs.cpp for an example of such a class definition.
*
* When the argument removeDuplicates is false, the implementation of
* edgeMap need not remove duplicate vertices from the VertexSet it
* creates when iterating over edges. This is a performance
* optimization when the application knows (and can tell ParaGraph)
* that f.update() guarantees that duplicate vertices cannot appear in
* the output vertex set.
*
* Further notes: the implementation of edgeMap is templated on the
* type of this object, which allows for higher performance code
* generation as these methods will be inlined.
*/
template <class F>
static VertexSet *edgeMap(Graph g, VertexSet *u, F &f,
bool removeDuplicates=true)
{
// outputSubset = {}
// foreach u in U: (in parallel)
// for each outgoing edge (u,v) from u: (in parallel)
// if (C(v) && F(u,v))
// outputSubset.append(v)
// remove_duplicates(outputSubset)
// return outputSubset
static int edge_counts[MAX_THREAD_NUM];
static int edge_sizes[MAX_THREAD_NUM];
int size = u -> size;
int total_num = num_nodes(g);
VertexSet* ret;
bool need_free = false;
if(size < total_num / 100) {
// ensure u is SPARSE
if(u -> type != SPARSE) {
u = ConvertDenseToSparse(u);
need_free = true;
}
ts_hashtable * hash_table;
int max_threads = omp_get_max_threads();
Vertex * vertices = u -> vertices;
#pragma omp parallel
{
int numThreads = omp_get_num_threads();
int blockSize = (size + numThreads - 1)/ numThreads;
int tid = omp_get_thread_num();
int start = blockSize * tid;
int end = start + blockSize;
end = end > size ? size : end;
int localCount = 0;
for (int i = start; i < end; i++) {
int diff = outgoing_size(g, vertices[i]);
localCount += diff;
}
#pragma vector nontemporal(edge_counts)
edge_counts[tid] = localCount;
}
int numNextPow2 = nextPow2(max_threads + 1);
exclusive_scan(edge_counts, numNextPow2);
int capacity = edge_counts[max_threads];
int * edges = (int *)malloc(sizeof(int) * (capacity + 1));
// top down approach
if(removeDuplicates)
hash_table = new_hashtable(capacity | 1); //odd number capacity
#pragma omp parallel
{
int numThreads = omp_get_num_threads();
int blockSize = (size + numThreads - 1)/ numThreads;
int tid = omp_get_thread_num();
int start = blockSize * tid;
int end = start + blockSize;
end = end > size ? size : end;
int localSize = 0;
int localOffset;
#pragma vector nontemporal(edge_counts)
localOffset = edge_counts[tid];
for (int i = start; i < end; i++) {
const Vertex v_i = vertices[i];
const Vertex* start = outgoing_begin(g, v_i);
const Vertex* end = outgoing_end(g, v_i);
for (const Vertex* k = start; k != end; k++) {
if (f.cond(*k) && f.update(v_i, *k) &&
(!removeDuplicates || !hashtable_set(hash_table, *k))) {
edges[localOffset + localSize] = *k;
localSize++;
}
}
}
#pragma vector nontemporal(edge_sizes)
edge_sizes[tid] = localSize;
}
exclusive_scan(edge_sizes, numNextPow2);
capacity = edge_sizes[max_threads];
ret = newVertexSet(SPARSE, capacity, total_num);
#pragma omp parallel for schedule(static)
for(int i = 0; i < max_threads; i++) {
int edge_sizestart = edge_sizes[i];
int edgeStart = edge_counts[i];
int length = edge_sizes[i + 1] - edge_sizes[i];
if(length > 0)
memcpy(&ret -> vertices[edge_sizestart], &edges[edgeStart], sizeof(int) * length);
}
setSize(ret, capacity);
free(edges);
if(removeDuplicates)
hashtable_free(hash_table);
}
else {
// ensure u is DENSE
if(u -> type != DENSE) {
u = ConvertSparseToDense(u);
need_free = true;
}
// buttom up approach
ret = newVertexSet(DENSE, size, total_num);
// Vertex is typedef'ed as int
int total_size = 0;
#pragma omp parallel for schedule(dynamic, 32) reduction(+:total_size)
for(Vertex chunk = 0; chunk < total_num; chunk+=CHUNK_SIZE) {
int mapValue = 0;
for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < total_num; i++) {
bool hasAdded = false;
const Vertex* k = incoming_begin(g, i);
const Vertex* end = incoming_end(g, i);
while(f.cond(i) && k != end) {
if ((u -> size == u -> numNodes || DenseHasVertex(u, *k))
&& f.update(*k, i) && !hasAdded) {
hasAdded = true;
mapValue |= 1 << (i - chunk);
total_size += 1;
}
k++;
}
}
DenseSetMapValue(ret, chunk / CHUNK_SIZE, mapValue);
}
setSize(ret, total_size);
}
if(need_free)
freeVertexSet(u);
return ret;
}
/*
* vertexMap --
*
* Students will implement this function.
*
* The input argument f is a class with the following methods defined:
* bool operator()(Vertex v)
*
* See apps/kBFS.cpp for an example implementation.
*
* Note that you'll call the function on a vertex as follows:
* Vertex v;
* bool result = f(v)
*
* If returnSet is false, then the implementation of vertexMap should
* return NULL (it need not build and create a vertex set)
*/
template <class F>
static VertexSet *vertexMap(VertexSet *u, F &f, bool returnSet=true)
{
// 1. apply F to all vertices in U
// 2. return a new vertex subset containing all vertices u in U
// for which F(u) == true
int size = u -> size;
int numNodes = u -> numNodes;
if(u -> type == SPARSE) {
Vertex * vertices = u -> vertices;
if (returnSet) {
VertexSet* ret = newVertexSet(SPARSE, size, numNodes);
#pragma omp parallel for
for (int i = 0; i < size; i++) {
if (f(vertices[i])) {
addVertex(ret, vertices[i]);
}
}
return ret;
}
else {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
f(vertices[i]);
}
return NULL;
}
}
else {
if (returnSet) {
int total_size = 0;
VertexSet* ret = newVertexSet(DENSE, size, numNodes);
#pragma omp parallel for schedule(static) reduction(+:total_size)
for(int chunk = 0; chunk < numNodes; chunk+=CHUNK_SIZE) {
int mapValue = 0;
for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < numNodes; i++) {
if (DenseHasVertex(u, i) && f(i)) {
mapValue |= 1 << (i - chunk);
total_size += 1;
}
}
DenseSetMapValue(ret, chunk / CHUNK_SIZE, mapValue);
}
setSize(ret, total_size);
return ret;
}
else {
#pragma omp parallel for schedule(static)
for(int chunk = 0; chunk < numNodes; chunk+=CHUNK_SIZE) {
int base = chunk / CHUNK_SIZE;
int map = DenseGetMapValue(u, base);
for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < numNodes; i++) {
if((map & (1 << (i-chunk))))
f(i);
}
}
return NULL;
}
}
}
#endif /* __PARAGRAPH_H__ */
|
ab-totient-omp-4.c | // Distributed and parallel technologies, Andrew Beveridge, 03/03/2014
// To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c
// To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end
#include <stdio.h>
#include <omp.h>
/* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1).
If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */
long getTotient (long number) {
long result = number;
// Check every prime number below the square root for divisibility
if(number % 2 == 0){
result -= result / 2;
do
number /= 2;
while(number %2 == 0);
}
// Primitive replacement for a list of primes, looping through every odd number
long prime;
for(prime = 3; prime * prime <= number; prime += 2){
if(number %prime == 0){
result -= result / prime;
do
number /= prime;
while(number % prime == 0);
}
}
// Last common factor
if(number > 1)
result -= result / number;
// Return the result.
return result;
}
// Main method.
int main(int argc, char ** argv) {
// Load inputs
long lower, upper;
sscanf(argv[1], "%ld", &lower);
sscanf(argv[2], "%ld", &upper);
int i;
long result = 0.0;
// We know the answer if it's 1; no need to execute the function
if(lower == 1) {
result = 1.0;
lower = 2;
}
#pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(4)
// Sum all totients in the specified range
for (i = lower; i <= upper; i++) {
result = result + getTotient(i);
}
// Print the result
printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result);
// A-OK!
return 0;
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->matte=image->matte;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if (method == ResizeDistortion)
{
if (number_arguments != 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t) arguments[0],
(size_t) arguments[1],exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0,
exception);
if (coeff == (double *) NULL)
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel )
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ( channel & GreenChannel )
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ( channel & BlueChannel )
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if ( channel & IndexChannel )
pixel.index=ClampPixel(QuantumRange*pixel.index);
if ( channel & OpacityChannel )
pixel.opacity=ClampPixel(QuantumRange*pixel.opacity);
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
getStartLists.c | #include "defs.h"
double getStartLists(graph* G, edge** maxIntWtListPtr,
INT_T* maxIntWtListSizePtr)
{
mcsim_skip_instrs_begin();
LONG_T *local_max, maxWeight;
edge *maxIntWtList;
LONG_T maxIntWtListSize;
LONG_T *p_start, *p_end;
double elapsed_time;
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
#endif
LONG_T i, j, n;
edge* pList;
LONG_T pCount, tmpListSize;
int tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
n = G->n;
/* Determine the maximum edge weight */
if (tid == 0) {
local_max = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
/* Allocate memory for partial edge list on each thread */
tmpListSize = 1000;
pList = (edge *) malloc(tmpListSize*sizeof(edge));
pCount = 0;
#ifdef _OPENMP
#pragma omp barrier
#endif
local_max[tid] = -1;
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
mcsim_skip_instrs_end();
#ifdef _OPENMP
#pragma omp for
#endif
for (i=0; i<n; i++) {
for (j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
if (G->weight[j] > local_max[tid]) {
mcsim_tx_begin();
#ifdef BASELINE
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
LONG_T *undolog_local_max;
undolog_local_max = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
undolog_local_max[tid] = local_max[tid];
#endif // UNDOLOG
#ifdef REDOLOG
LONG_T *redolog_local_max;
redolog_local_max = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
redolog_local_max[tid] = G->weight[j];
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // BASELINE
local_max[tid] = G->weight[j];
mcsim_tx_end();
#ifdef CLWB
mcsim_clwb( &( local_max[tid] ) );
#endif // CLWB
pCount = 0;
mcsim_tx_begin();
#ifdef BASELINE
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
edge *undolog_pList;
undolog_pList = (edge *) malloc(tmpListSize*sizeof(edge));
undolog_pList[pCount].startVertex = pList[pCount].startVertex;
undolog_pList[pCount].endVertex = pList[pCount].endVertex;
undolog_pList[pCount].w = pList[pCount].w;
undolog_pList[pCount].e = pList[pCount].e;
#endif // UNDOLOG
#ifdef REDOLOG
edge *redolog_pList;
redolog_pList = (edge *) malloc(tmpListSize*sizeof(edge));
redolog_pList[pCount].startVertex = i;
redolog_pList[pCount].endVertex = G->endV[j];
redolog_pList[pCount].w = local_max[tid];
redolog_pList[pCount].e = j;
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // BASELINE
pList[pCount].startVertex = i;
pList[pCount].endVertex = G->endV[j];
pList[pCount].w = local_max[tid];
pList[pCount].e = j;
mcsim_tx_end();
#ifdef CLWB
mcsim_clwb( &( pList[pCount].startVertex ) );
mcsim_clwb( &( pList[pCount].endVertex ) );
mcsim_clwb( &( pList[pCount].w ) );
mcsim_clwb( &( pList[pCount].e ) );
#endif // CLWB
pCount++;
// make sure undolog and redolog data structures are not discarded by compiler
mcsim_skip_instrs_begin();
#ifdef UNDOLOG
printf("%d\n", (int)((sizeof undolog_local_max) + (sizeof undolog_pList)));
#endif // UNDOLOG
#ifdef REDOLOG
printf("%d\n", (int)((sizeof redolog_local_max) + (sizeof redolog_pList)));
#endif // REDOLOG
mcsim_skip_instrs_end();
} else if (G->weight[j] == local_max[tid]) {
mcsim_tx_begin();
#ifdef BASELINE
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
edge *undolog_pList;
undolog_pList = (edge *) malloc(tmpListSize*sizeof(edge));
undolog_pList[pCount].startVertex = pList[pCount].startVertex;
undolog_pList[pCount].endVertex = pList[pCount].endVertex;
undolog_pList[pCount].w = pList[pCount].w;
undolog_pList[pCount].e = pList[pCount].e;
#endif // UNDOLOG
#ifdef REDOLOG
edge *redolog_pList;
redolog_pList = (edge *) malloc(tmpListSize*sizeof(edge));
redolog_pList[pCount].startVertex = i;
redolog_pList[pCount].endVertex = G->endV[j];
redolog_pList[pCount].w = local_max[tid];
redolog_pList[pCount].e = j;
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // BASELINE
pList[pCount].startVertex = i;
pList[pCount].endVertex = G->endV[j];
pList[pCount].w = local_max[tid];
pList[pCount].e = j;
mcsim_tx_end();
#ifdef CLWB
mcsim_clwb( &( pList[pCount].startVertex ) );
mcsim_clwb( &( pList[pCount].endVertex ) );
mcsim_clwb( &( pList[pCount].w ) );
mcsim_clwb( &( pList[pCount].e ) );
#endif // CLWB
pCount++;
// make sure undolog and redolog data structures are not discarded by compiler
mcsim_skip_instrs_begin();
#ifdef UNDOLOG
printf("%d\n", (int)(sizeof undolog_pList));
#endif // UNDOLOG
#ifdef REDOLOG
printf("%d\n", (int)(sizeof redolog_pList));
#endif // REDOLOG
mcsim_skip_instrs_end();
}
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Max. weight computation time: %lf seconds\n", elapsed_time_part);
}
#endif
maxWeight = local_max[0];
for (i=1; i<nthreads; i++) {
if (local_max[i] > maxWeight)
maxWeight = local_max[i];
}
// free(local_max);
}
#ifdef _OPENMP
#pragma omp barrier
#endif
if (local_max[tid] != maxWeight) {
pCount = 0;
}
mcsim_skip_instrs_begin();
/* Merge all te partial edge lists */
if (tid == 0) {
p_start = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
p_end = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
p_end[tid] = pCount;
p_start[tid] = 0;
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
for (i=1; i<nthreads; i++) {
p_end[i] = p_end[i-1] + p_end[i];
p_start[i] = p_end[i-1];
}
maxIntWtListSize = p_end[nthreads-1];
free(*maxIntWtListPtr);
maxIntWtList = (edge *) malloc((maxIntWtListSize)*sizeof(edge));
}
mcsim_skip_instrs_end();
#ifdef _OPENMP
#pragma omp barrier
#endif
for (j=p_start[tid]; j<p_end[tid]; j++) {
mcsim_tx_begin();
#ifdef BASELINE
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
edge *undolog_maxIntWtList;
undolog_maxIntWtList = (edge *) malloc((maxIntWtListSize)*sizeof(edge));
(undolog_maxIntWtList[j]).startVertex = (maxIntWtList[j]).startVertex;
(undolog_maxIntWtList[j]).endVertex = (maxIntWtList[j]).endVertex;
(undolog_maxIntWtList[j]).e = (maxIntWtList[j]).e;
(undolog_maxIntWtList[j]).w = (maxIntWtList[j]).w;
#endif // UNDOLOG
#ifdef REDOLOG
edge *redolog_maxIntWtList;
redolog_maxIntWtList = (edge *) malloc((maxIntWtListSize)*sizeof(edge));
(redolog_maxIntWtList[j]).startVertex = pList[j-p_start[tid]].startVertex;
(redolog_maxIntWtList[j]).endVertex = pList[j-p_start[tid]].endVertex;
(redolog_maxIntWtList[j]).e = pList[j-p_start[tid]].e;
(redolog_maxIntWtList[j]).w = pList[j-p_start[tid]].w;
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // BASELINE
(maxIntWtList[j]).startVertex = pList[j-p_start[tid]].startVertex;
(maxIntWtList[j]).endVertex = pList[j-p_start[tid]].endVertex;
(maxIntWtList[j]).e = pList[j-p_start[tid]].e;
(maxIntWtList[j]).w = pList[j-p_start[tid]].w;
mcsim_tx_end();
#ifdef CLWB
mcsim_clwb( &( (maxIntWtList[j]).startVertex ) );
mcsim_clwb( &( (maxIntWtList[j]).endVertex ) );
mcsim_clwb( &( (maxIntWtList[j]).e ) );
mcsim_clwb( &( (maxIntWtList[j]).w ) );
#endif // CLWB
mcsim_skip_instrs_begin();
#ifdef UNDOLOG
printf("%d\n", (int)(sizeof undolog_maxIntWtList));
#endif // UNDOLOG
#ifdef REDOLOG
printf("%d\n", (int)(sizeof redolog_maxIntWtList));
#endif // REDOLOG
mcsim_skip_instrs_end();
}
#ifdef _OPENMP
#pragma omp barrier
#endif
mcsim_skip_instrs_begin();
free(pList);
if (tid == 0) {
free(local_max);
free(p_start);
free(p_end);
*maxIntWtListPtr = maxIntWtList;
*maxIntWtListSizePtr = maxIntWtListSize;
}
#ifdef _OPENMP
}
#endif
/* Verification */
#if 0
maxIntWtList = *maxIntWtListPtr;
for (int i=0; i<*maxIntWtListSizePtr; i++) {
fprintf(stderr, "[%ld %ld %ld %ld] ", maxIntWtList[i].startVertex,
maxIntWtList[i].endVertex, maxIntWtList[i].e, maxIntWtList[i].w);
}
#endif
elapsed_time = get_seconds() - elapsed_time;
mcsim_skip_instrs_end();
return elapsed_time;
}
|
Parallel.h | #pragma once
#include <ATen/ATen.h>
#include <atomic>
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace at {
namespace internal {
// This parameter is heuristically chosen to determine the minimum number of
// work that warrants paralellism. For example, when summing an array, it is
// deemed inefficient to parallelise over arrays shorter than 32768. Further,
// no parallel algorithm (such as parallel_reduce) should split work into
// smaller than GRAIN_SIZE chunks.
constexpr int64_t GRAIN_SIZE = 32768;
} // namespace internal
inline int64_t divup(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
inline int get_max_threads() {
#ifdef _OPENMP
return omp_get_max_threads();
#else
return 1;
#endif
}
inline int get_thread_num() {
#ifdef _OPENMP
return omp_get_thread_num();
#else
return 0;
#endif
}
inline bool in_parallel_region() {
#ifdef _OPENMP
return omp_in_parallel();
#else
return false;
#endif
}
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel if (!omp_in_parallel() && ((end - begin) >= grain_size))
{
int64_t num_threads = omp_get_num_threads();
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
if (begin < end) {
f(begin, end);
}
#endif
}
/*
parallel_reduce
begin: index at which to start applying reduction
end: index at which to stop applying reduction
grain_size: number of elements per chunk. impacts number of elements in
intermediate results tensor and degree of parallelization.
ident: identity for binary combination function sf. sf(ident, x) needs to return
x.
f: function for reduction over a chunk. f needs to be of signature scalar_t
f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
sf: function to combine two partial results. sf needs to be of signature
scalar_t sf(scalar_t x, scalar_t y)
For example, you might have a tensor of 10000 entires and want to sum together
all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
an intermediate result tensor with 4 elements. Then it will execute the function
"f" you provide and pass the beginning and end index of these chunks, so
0-24999, 2500-4999, etc. and the combination identity. It will then write out
the result from each of these chunks into the intermediate result tensor. After
that it'll reduce the partial results from each chunk into a single number using
the combination function sf and the identity ident. For a total summation this
would be "+" and 0 respectively. This is similar to tbb's approach [1], where
you need to provide a function to accumulate a subrange, a function to combine
two partial results and an identity.
[1] https://software.intel.com/en-us/node/506154
*/
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F f,
const SF sf) {
if (get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
}
return std::accumulate(
results_data, results_data + results.size(), ident, sf);
}
}
} // namespace at
|
integral_omp_sync.c | // Author: Fabio Rodrigues Pereira
// E-mail: fabior@uio.no
// compiling & running
// clang -Xpreprocessor -fopenmp integral_omp_sync.c -lomp
// ./a.out
// SPMD technique *video9
#include <stdlib.h> // rand, malloc, calloc and free.
#include <stdio.h> // printf
#include <math.h>
#include <time.h>
#include <omp.h>
#define NUM_THREADS 2
int main()
{
static long num_steps = 100000;
double step;
double pi=0.0;
step=1.0/(double)num_steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int i, id, nthrds;
double x, sum;
// being sure that the compiler gave a correct num of threads
id = omp_get_thread_num();
nthrds = omp_get_num_threads();
for (i = id, sum = 0.0; i < num_steps; i = i + nthrds) {
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
#pragma omp critical
pi+=sum*step;
}
printf("%f", pi);
return 0;
} |
convolution_1x1_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)4 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
float* g00 = g0.row(p / 8);
g00[0] = k00[0];
g00[1] = k10[0];
g00[2] = k20[0];
g00[3] = k30[0];
g00[4] = k40[0];
g00[5] = k50[0];
g00[6] = k60[0];
g00[7] = k70[0];
g00 += 8;
g00[0] = k01[0];
g00[1] = k11[0];
g00[2] = k21[0];
g00[3] = k31[0];
g00[4] = k41[0];
g00[5] = k51[0];
g00[6] = k61[0];
g00[7] = k71[0];
g00 += 8;
g00[0] = k02[0];
g00[1] = k12[0];
g00[2] = k22[0];
g00[3] = k32[0];
g00[4] = k42[0];
g00[5] = k52[0];
g00[6] = k62[0];
g00[7] = k72[0];
g00 += 8;
g00[0] = k03[0];
g00[1] = k13[0];
g00[2] = k23[0];
g00[3] = k33[0];
g00[4] = k43[0];
g00[5] = k53[0];
g00[6] = k63[0];
g00[7] = k73[0];
g00 += 8;
g00[0] = k04[0];
g00[1] = k14[0];
g00[2] = k24[0];
g00[3] = k34[0];
g00[4] = k44[0];
g00[5] = k54[0];
g00[6] = k64[0];
g00[7] = k74[0];
g00 += 8;
g00[0] = k05[0];
g00[1] = k15[0];
g00[2] = k25[0];
g00[3] = k35[0];
g00[4] = k45[0];
g00[5] = k55[0];
g00[6] = k65[0];
g00[7] = k75[0];
g00 += 8;
g00[0] = k06[0];
g00[1] = k16[0];
g00[2] = k26[0];
g00[3] = k36[0];
g00[4] = k46[0];
g00[5] = k56[0];
g00[6] = k66[0];
g00[7] = k76[0];
g00 += 8;
g00[0] = k07[0];
g00[1] = k17[0];
g00[2] = k27[0];
g00[3] = k37[0];
g00[4] = k47[0];
g00[5] = k57[0];
g00[6] = k67[0];
g00[7] = k77[0];
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
_mm256_comp_fmadd_ps4(_sum0, _w0, _w1, _w2, _w3, _val00, _val01, _val02, _val03);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
_mm256_comp_fmadd_ps4(_sum0, _w4, _w5, _w6, _w7, _val04, _val05, _val06, _val07);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
_mm256_comp_fmadd_ps4(_sum1, _w0, _w1, _w2, _w3, _val10, _val11, _val12, _val13);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_mm256_comp_fmadd_ps4(_sum1, _w4, _w5, _w6, _w7, _val14, _val15, _val16, _val17);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
_mm256_comp_fmadd_ps4(_sum2, _w0, _w1, _w2, _w3, _val20, _val21, _val22, _val23);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
_mm256_comp_fmadd_ps4(_sum2, _w4, _w5, _w6, _w7, _val24, _val25, _val26, _val27);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
_mm256_comp_fmadd_ps4(_sum3, _w0, _w1, _w2, _w3, _val30, _val31, _val32, _val33);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_mm256_comp_fmadd_ps4(_sum3, _w4, _w5, _w6, _w7, _val34, _val35, _val36, _val37);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
_mm256_comp_fmadd_ps4(_sum4, _w0, _w1, _w2, _w3, _val40, _val41, _val42, _val43);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
_mm256_comp_fmadd_ps4(_sum4, _w4, _w5, _w6, _w7, _val44, _val45, _val46, _val47);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
_mm256_comp_fmadd_ps4(_sum5, _w0, _w1, _w2, _w3, _val50, _val51, _val52, _val53);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_mm256_comp_fmadd_ps4(_sum5, _w4, _w5, _w6, _w7, _val54, _val55, _val56, _val57);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
_mm256_comp_fmadd_ps4(_sum6, _w0, _w1, _w2, _w3, _val60, _val61, _val62, _val63);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
_mm256_comp_fmadd_ps4(_sum6, _w4, _w5, _w6, _w7, _val64, _val65, _val66, _val67);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
_mm256_comp_fmadd_ps4(_sum7, _w0, _w1, _w2, _w3, _val70, _val71, _val72, _val73);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_mm256_comp_fmadd_ps4(_sum7, _w4, _w5, _w6, _w7, _val74, _val75, _val76, _val77);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
_mm256_comp_fmadd_ps4(_sum8, _w0, _w1, _w2, _w3, _val80, _val81, _val82, _val83);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
_mm256_comp_fmadd_ps4(_sum8, _w4, _w5, _w6, _w7, _val84, _val85, _val86, _val87);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
_mm256_comp_fmadd_ps4(_sum9, _w0, _w1, _w2, _w3, _val90, _val91, _val92, _val93);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_mm256_comp_fmadd_ps4(_sum9, _w4, _w5, _w6, _w7, _val94, _val95, _val96, _val97);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
_mm256_comp_fmadd_ps4(_sum10, _w0, _w1, _w2, _w3, _val100, _val101, _val102, _val103);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
_mm256_comp_fmadd_ps4(_sum10, _w4, _w5, _w6, _w7, _val104, _val105, _val106, _val107);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
_mm256_comp_fmadd_ps4(_sum11, _w0, _w1, _w2, _w3, _val110, _val111, _val112, _val113);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_mm256_comp_fmadd_ps4(_sum11, _w4, _w5, _w6, _w7, _val114, _val115, _val116, _val117);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum = _mm256_comp_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_comp_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_comp_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_comp_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_comp_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_comp_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_comp_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_comp_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
fold.c | /** \file **/
/*
minimum free energy
RNA secondary structure prediction
c Ivo Hofacker, Chrisoph Flamm
original implementation by
Walter Fontana
g-quadruplex support and threadsafety
by Ronny Lorenz
Vienna RNA package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "pair_mat.h"
#include "params.h"
#include "loop_energies.h"
#include "data_structures.h"
#include "gquad.h"
#include "fold.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define PAREN
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define SAME_STRAND(I,J) (((I)>=cut_point)||((J)<cut_point))
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int logML = 0; /* if nonzero use logarithmic ML energy in energy_of_struct */
PUBLIC int uniq_ML = 0; /* do ML decomposition uniquely (for subopt) */
PUBLIC int cut_point = -1; /* set to first pos of second seq for cofolding */
PUBLIC int eos_debug = 0; /* verbose info from energy_of_struct */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE int *indx = NULL; /* index for moving in the triangle matrices c[] and fMl[]*/
PRIVATE int *c = NULL; /* energy array, given that i-j pair */
PRIVATE int *cc = NULL; /* linear array for calculating canonical structures */
PRIVATE int *cc1 = NULL; /* " " */
PRIVATE int *f5 = NULL; /* energy of 5' end */
PRIVATE int *f53 = NULL; /* energy of 5' end with 3' nucleotide not available for mismatches */
PRIVATE int *fML = NULL; /* multi-loop auxiliary energy array */
PRIVATE int *fM1 = NULL; /* second ML array, only for subopt */
PRIVATE int *fM2 = NULL; /* fM2 = multiloop region with exactly two stems, extending to 3' end */
PRIVATE int *Fmi = NULL; /* holds row i of fML (avoids jumps in memory) */
PRIVATE int *DMLi = NULL; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */
PRIVATE int *DMLi1 = NULL; /* MIN(fML[i+1,k]+fML[k+1,j]) */
PRIVATE int *DMLi2 = NULL; /* MIN(fML[i+2,k]+fML[k+1,j]) */
PRIVATE int *DMLi_a = NULL; /* DMLi_a[j] holds min energy for at least two multiloop stems in [i,j], where j is available for dangling onto a surrounding stem */
PRIVATE int *DMLi_o = NULL; /* DMLi_o[j] holds min energy for at least two multiloop stems in [i,j], where j is unavailable for dangling onto a surrounding stem */
PRIVATE int *DMLi1_a = NULL;
PRIVATE int *DMLi1_o = NULL;
PRIVATE int *DMLi2_a = NULL;
PRIVATE int *DMLi2_o = NULL;
PRIVATE int Fc, FcH, FcI, FcM; /* parts of the exterior loop energies */
PRIVATE sect sector[MAXSECTORS]; /* stack of partial structures for backtracking */
PRIVATE char *ptype = NULL; /* precomputed array of pair types */
PRIVATE short *S = NULL, *S1 = NULL;
PRIVATE paramT *P = NULL;
PRIVATE int init_length = -1;
PRIVATE int *BP = NULL; /* contains the structure constrainsts: BP[i]
-1: | = base must be paired
-2: < = base must be paired with j<i
-3: > = base must be paired with j>i
-4: x = base must not pair
positive int: base is paired with int */
PRIVATE short *pair_table = NULL; /* needed by energy of struct */
PRIVATE bondT *base_pair2 = NULL; /* this replaces base_pair from fold_vars.c */
PRIVATE int circular = 0;
PRIVATE int struct_constrained = 0;
PRIVATE int with_gquad = 0;
PRIVATE int *ggg = NULL; /* minimum free energies of the gquadruplexes */
#ifdef _OPENMP
#pragma omp threadprivate(indx, c, cc, cc1, f5, f53, fML, fM1, fM2, Fmi,\
DMLi, DMLi1, DMLi2, DMLi_a, DMLi_o, DMLi1_a, DMLi1_o, DMLi2_a, DMLi2_o,\
Fc, FcH, FcI, FcM,\
sector, ptype, S, S1, P, init_length, BP, pair_table, base_pair2, circular, struct_constrained,\
ggg, with_gquad)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void get_arrays(unsigned int size);
PRIVATE int stack_energy(int i, const char *string, int verbostiy_level);
PRIVATE int energy_of_extLoop_pt(int i, short *pair_table);
PRIVATE int energy_of_ml_pt(int i, short *pt);
PRIVATE int ML_Energy(int i, int is_extloop);
PRIVATE void make_ptypes(const short *S, const char *structure, paramT *P);
PRIVATE void backtrack(const char *sequence, int s);
PRIVATE int fill_arrays(const char *sequence);
PRIVATE void fill_arrays_circ(const char *string, int *bt);
PRIVATE void init_fold(int length, paramT *parameters);
/* needed by cofold/eval */
PRIVATE int cut_in_loop(int i);
/* deprecated functions */
/*@unused@*/
int oldLoopEnergy(int i, int j, int p, int q, int type, int type_2);
int LoopEnergy(int n1, int n2, int type, int type_2, int si1, int sj1, int sp1, int sq1);
int HairpinE(int size, int type, int si1, int sj1, const char *string);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/* allocate memory for folding process */
PRIVATE void init_fold(int length, paramT *parameters){
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
if (length<1) nrerror("initialize_fold: argument must be greater 0");
free_arrays();
get_arrays((unsigned) length);
init_length=length;
indx = get_indx((unsigned)length);
update_fold_params_par(parameters);
}
/*--------------------------------------------------------------------------*/
PRIVATE void get_arrays(unsigned int size){
if(size >= (unsigned int)sqrt((double)INT_MAX))
nrerror("get_arrays@fold.c: sequence length exceeds addressable range");
c = (int *) space(sizeof(int)*((size*(size+1))/2+2));
fML = (int *) space(sizeof(int)*((size*(size+1))/2+2));
if (uniq_ML)
fM1 = (int *) space(sizeof(int)*((size*(size+1))/2+2));
ptype = (char *)space(sizeof(char)*((size*(size+1))/2+2));
f5 = (int *) space(sizeof(int)*(size+2));
f53 = (int *) space(sizeof(int)*(size+2));
cc = (int *) space(sizeof(int)*(size+2));
cc1 = (int *) space(sizeof(int)*(size+2));
Fmi = (int *) space(sizeof(int)*(size+1));
DMLi = (int *) space(sizeof(int)*(size+1));
DMLi1 = (int *) space(sizeof(int)*(size+1));
DMLi2 = (int *) space(sizeof(int)*(size+1));
DMLi_a = (int *) space(sizeof(int)*(size+1));
DMLi_o = (int *) space(sizeof(int)*(size+1));
DMLi1_a = (int *) space(sizeof(int)*(size+1));
DMLi1_o = (int *) space(sizeof(int)*(size+1));
DMLi2_a = (int *) space(sizeof(int)*(size+1));
DMLi2_o = (int *) space(sizeof(int)*(size+1));
base_pair2 = (bondT *) space(sizeof(bondT)*(1+size/2));
/* extra array(s) for circfold() */
if(circular) fM2 = (int *) space(sizeof(int)*(size+2));
}
/*--------------------------------------------------------------------------*/
PUBLIC void free_arrays(void){
if(indx) free(indx);
if(c) free(c);
if(fML) free(fML);
if(f5) free(f5);
if(f53) free(f53);
if(cc) free(cc);
if(cc1) free(cc1);
if(ptype) free(ptype);
if(fM1) free(fM1);
if(fM2) free(fM2);
if(base_pair2) free(base_pair2);
if(Fmi) free(Fmi);
if(DMLi) free(DMLi);
if(DMLi1) free(DMLi1);
if(DMLi2) free(DMLi2);
if(DMLi_a) free(DMLi_a);
if(DMLi_o) free(DMLi_o);
if(DMLi1_a) free(DMLi1_a);
if(DMLi1_o) free(DMLi1_o);
if(DMLi2_a) free(DMLi2_a);
if(DMLi2_o) free(DMLi2_o);
if(P) free(P);
if(ggg) free(ggg);
indx = c = fML = f5 = f53 = cc = cc1 = fM1 = fM2 = Fmi = DMLi = DMLi1 = DMLi2 = ggg = NULL;
DMLi_a = DMLi_o = DMLi1_a = DMLi1_o = DMLi2_a = DMLi2_o = NULL;
ptype = NULL;
base_pair = NULL;
base_pair2 = NULL;
P = NULL;
init_length = 0;
}
/*--------------------------------------------------------------------------*/
PUBLIC void export_fold_arrays( int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p){
/* make the DP arrays available to routines such as subopt() */
*f5_p = f5;
*c_p = c;
*fML_p = fML;
*fM1_p = fM1;
*indx_p = indx;
*ptype_p = ptype;
}
PUBLIC void export_fold_arrays_par( int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p,
paramT **P_p){
export_fold_arrays(f5_p, c_p, fML_p, fM1_p, indx_p,ptype_p);
*P_p = P;
}
PUBLIC void export_circfold_arrays( int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p,
int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p){
/* make the DP arrays available to routines such as subopt() */
*f5_p = f5;
*c_p = c;
*fML_p = fML;
*fM1_p = fM1;
*fM2_p = fM2;
*Fc_p = Fc;
*FcH_p = FcH;
*FcI_p = FcI;
*FcM_p = FcM;
*indx_p = indx;
*ptype_p = ptype;
}
PUBLIC void export_circfold_arrays_par( int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p,
int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p,
paramT **P_p){
export_circfold_arrays(Fc_p, FcH_p, FcI_p, FcM_p, fM2_p, f5_p, c_p, fML_p, fM1_p, indx_p, ptype_p);
*P_p = P;
}
/*--------------------------------------------------------------------------*/
PUBLIC float fold(const char *string, char *structure){
return fold_par(string, structure, NULL, fold_constrained, 0);
}
PUBLIC float circfold(const char *string, char *structure){
return fold_par(string, structure, NULL, fold_constrained, 1);
}
PUBLIC float fold_par(const char *string,
char *structure,
paramT *parameters,
int is_constrained,
int is_circular){
int i, length, energy, bonus, bonus_cnt, s;
bonus = 0;
bonus_cnt = 0;
s = 0;
circular = is_circular;
struct_constrained = is_constrained;
length = (int) strlen(string);
#ifdef _OPENMP
init_fold(length, parameters);
#else
if (parameters) init_fold(length, parameters);
else if (length>init_length) init_fold(length, parameters);
else if (fabs(P->temperature - temperature)>1e-6) update_fold_params();
#endif
with_gquad = P->model_details.gquad;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
BP = (int *)space(sizeof(int)*(length+2));
if(with_gquad){ /* add a guess of how many G's may be involved in a G quadruplex */
if(base_pair2)
free(base_pair2);
base_pair2 = (bondT *) space(sizeof(bondT)*(4*(1+length/2)));
}
make_ptypes(S, structure, P);
energy = fill_arrays(string);
if(circular){
fill_arrays_circ(string, &s);
energy = Fc;
}
backtrack(string, s);
#ifdef PAREN
parenthesis_structure(structure, base_pair2, length);
#else
letter_structure(structure, base_pair2, length);
#endif
/*
* Backward compatibility:
* This block may be removed if deprecated functions
* relying on the global variable "base_pair" vanishs from within the package!
*/
base_pair = base_pair2;
/*
{
if(base_pair) free(base_pair);
base_pair = (bondT *)space(sizeof(bondT) * (1+length/2));
memcpy(base_pair, base_pair2, sizeof(bondT) * (1+length/2));
}
*/
/* check constraints */
for(i=1;i<=length;i++) {
if((BP[i]<0)&&(BP[i]>-4)) {
bonus_cnt++;
if((BP[i]==-3)&&(structure[i-1]==')')) bonus++;
if((BP[i]==-2)&&(structure[i-1]=='(')) bonus++;
if((BP[i]==-1)&&(structure[i-1]!='.')) bonus++;
}
if(BP[i]>i) {
int l;
bonus_cnt++;
for(l=1; l<=base_pair2[0].i; l++)
if(base_pair2[l].i != base_pair2[l].j)
if((i==base_pair2[l].i)&&(BP[i]==base_pair2[l].j)) bonus++;
}
}
if (bonus_cnt>bonus) fprintf(stderr,"\ncould not enforce all constraints\n");
bonus*=BONUS;
free(S); free(S1); free(BP);
energy += bonus; /*remove bonus energies from result */
if (backtrack_type=='C')
return (float) c[indx[length]+1]/100.;
else if (backtrack_type=='M')
return (float) fML[indx[length]+1]/100.;
else
return (float) energy/100.;
}
/**
*** fill "c", "fML" and "f5" arrays and return optimal energy
**/
PRIVATE int fill_arrays(const char *string) {
int i, j, k, length, energy, en, mm5, mm3;
int decomp, new_fML, max_separation;
int no_close, type, type_2, tt;
int bonus=0;
int dangle_model, noGUclosure, with_gquads;
dangle_model = P->model_details.dangles;
noGUclosure = P->model_details.noGUclosure;
length = (int) strlen(string);
max_separation = (int) ((1.-LOCALITY)*(double)(length-2)); /* not in use */
if(with_gquad)
ggg = get_gquad_matrix(S, P);
for (j=1; j<=length; j++) {
Fmi[j]=DMLi[j]=DMLi1[j]=DMLi2[j]=INF;
}
for (j = 1; j<=length; j++)
for (i=(j>TURN?(j-TURN):1); i<j; i++) {
c[indx[j]+i] = fML[indx[j]+i] = INF;
if (uniq_ML) fM1[indx[j]+i] = INF;
}
if (length <= TURN) return 0;
for (i = length-TURN-1; i >= 1; i--) { /* i,j in [1..length] */
for (j = i+TURN+1; j <= length; j++) {
int p, q, ij, jj, ee;
int minq, maxq, l1, up, c0, c1, c2, c3;
int MLenergy;
ij = indx[j]+i;
bonus = 0;
type = ptype[ij];
energy = INF;
/* enforcing structure constraints */
if ((BP[i]==j)||(BP[i]==-1)||(BP[i]==-2)) bonus -= BONUS;
if ((BP[j]==-1)||(BP[j]==-3)) bonus -= BONUS;
if ((BP[i]==-4)||(BP[j]==-4)) type=0;
no_close = (((type==3)||(type==4))&&noGUclosure&&(bonus==0));
if (j-i-1 > max_separation) type = 0; /* forces locality degree */
if (type) { /* we have a pair */
int new_c=0, stackEnergy=INF;
/* hairpin ----------------------------------------------*/
new_c = (no_close) ? FORBIDDEN : E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], string+i-1, P);
/*--------------------------------------------------------
check for elementary structures involving more than one
closing pair.
--------------------------------------------------------*/
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1) ; p++) {
minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = minq; q < j; q++) {
type_2 = ptype[indx[q]+p];
if (type_2==0) continue;
type_2 = rtype[type_2];
if (noGUclosure)
if (no_close||(type_2==3)||(type_2==4))
if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2,
S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
ee = energy+c[indx[q]+p];
new_c = MIN2(new_c, ee);
if ((p==i+1)&&(j==q+1)) stackEnergy = energy; /* remember stack energy */
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
if (!no_close) {
decomp = DMLi1[j-1];
tt = rtype[type];
switch(dangle_model){
/* no dangles */
case 0: decomp += E_MLstem(tt, -1, -1, P);
break;
/* double dangles */
case 2: decomp += E_MLstem(tt, S1[j-1], S1[i+1], P);
break;
/* normal dangles, aka dangles = 1 || 3 */
default: decomp += E_MLstem(tt, -1, -1, P);
decomp = MIN2(decomp, DMLi2[j-1] + E_MLstem(tt, -1, S1[i+1], P) + P->MLbase);
decomp = MIN2(decomp, DMLi2[j-2] + E_MLstem(tt, S1[j-1], S1[i+1], P) + 2*P->MLbase);
decomp = MIN2(decomp, DMLi1[j-2] + E_MLstem(tt, S1[j-1], -1, P) + P->MLbase);
break;
}
MLenergy = decomp + P->MLclosing;
new_c = MIN2(new_c, MLenergy);
}
/* coaxial stacking of (i.j) with (i+1.k) or (k+1.j-1) */
if (dangle_model==3) {
decomp = INF;
for (k = i+2+TURN; k < j-2-TURN; k++) {
type_2 = rtype[ptype[indx[k]+i+1]];
if (type_2)
decomp = MIN2(decomp, c[indx[k]+i+1]+P->stack[type][type_2]+fML[indx[j-1]+k+1]);
type_2 = rtype[ptype[indx[j-1]+k+1]];
if (type_2)
decomp = MIN2(decomp, c[indx[j-1]+k+1]+P->stack[type][type_2]+fML[indx[k]+i+1]);
}
/* no TermAU penalty if coax stack */
decomp += 2*P->MLintern[1] + P->MLclosing;
new_c = MIN2(new_c, decomp);
}
if(with_gquad){
/* include all cases where a g-quadruplex may be enclosed by base pair (i,j) */
if (!no_close) {
tt = rtype[type];
energy = E_GQuad_IntLoop(i, j, type, S1, ggg, indx, P);
new_c = MIN2(new_c, energy);
}
}
new_c = MIN2(new_c, cc1[j-1]+stackEnergy);
cc[j] = new_c + bonus;
if (noLonelyPairs)
c[ij] = cc1[j-1]+stackEnergy+bonus;
else
c[ij] = cc[j];
} /* end >> if (pair) << */
else c[ij] = INF;
/* done with c[i,j], now compute fML[i,j] and fM1[i,j] */
/* (i,j) + MLstem ? */
new_fML = INF;
if(type){
new_fML = c[ij];
switch(dangle_model){
case 2: new_fML += E_MLstem(type, (i==1) ? S1[length] : S1[i-1], S1[j+1], P);
break;
default: new_fML += E_MLstem(type, -1, -1, P);
break;
}
}
if(with_gquad){
new_fML = MIN2(new_fML, ggg[indx[j] + i] + E_MLstem(0, -1, -1, P));
}
if (uniq_ML){
fM1[ij] = MIN2(fM1[indx[j-1]+i] + P->MLbase, new_fML);
}
/* free ends ? -----------------------------------------*/
/* we must not just extend 3'/5' end by unpaired nucleotides if
* dangle_model == 1, this could lead to d5+d3 contributions were
* mismatch must be taken!
*/
switch(dangle_model){
/* no dangles */
case 0: new_fML = MIN2(new_fML, fML[ij+1]+P->MLbase);
new_fML = MIN2(fML[indx[j-1]+i]+P->MLbase, new_fML);
break;
/* double dangles */
case 2: new_fML = MIN2(new_fML, fML[ij+1]+P->MLbase);
new_fML = MIN2(fML[indx[j-1]+i]+P->MLbase, new_fML);
break;
/* normal dangles, aka dangle_model = 1 || 3 */
default: mm5 = ((i>1) || circular) ? S1[i] : -1;
mm3 = ((j<length) || circular) ? S1[j] : -1;
new_fML = MIN2(new_fML, fML[ij+1] + P->MLbase);
new_fML = MIN2(new_fML, fML[indx[j-1]+i] + P->MLbase);
tt = ptype[ij+1];
if(tt) new_fML = MIN2(new_fML, c[ij+1] + E_MLstem(tt, mm5, -1, P) + P->MLbase);
tt = ptype[indx[j-1]+i];
if(tt) new_fML = MIN2(new_fML, c[indx[j-1]+i] + E_MLstem(tt, -1, mm3, P) + P->MLbase);
tt = ptype[indx[j-1]+i+1];
if(tt) new_fML = MIN2(new_fML, c[indx[j-1]+i+1] + E_MLstem(tt, mm5, mm3, P) + 2*P->MLbase);
break;
}
/* modular decomposition -------------------------------*/
for (decomp = INF, k = i + 1 + TURN; k <= j - 2 - TURN; k++)
decomp = MIN2(decomp, Fmi[k]+fML[indx[j]+k+1]);
DMLi[j] = decomp; /* store for use in ML decompositon */
new_fML = MIN2(new_fML,decomp);
/* coaxial stacking */
if (dangle_model==3) {
/* additional ML decomposition as two coaxially stacked helices */
for (decomp = INF, k = i+1+TURN; k <= j-2-TURN; k++) {
type = ptype[indx[k]+i]; type = rtype[type];
type_2 = ptype[indx[j]+k+1]; type_2 = rtype[type_2];
if (type && type_2)
decomp = MIN2(decomp,
c[indx[k]+i]+c[indx[j]+k+1]+P->stack[type][type_2]);
}
decomp += 2*P->MLintern[1]; /* no TermAU penalty if coax stack */
#if 0
/* This is needed for Y shaped ML loops with coax stacking of
interior pairts, but backtracking will fail if activated */
DMLi[j] = MIN2(DMLi[j], decomp);
DMLi[j] = MIN2(DMLi[j], DMLi[j-1]+P->MLbase);
DMLi[j] = MIN2(DMLi[j], DMLi1[j]+P->MLbase);
new_fML = MIN2(new_fML, DMLi[j]);
#endif
new_fML = MIN2(new_fML, decomp);
}
fML[ij] = Fmi[j] = new_fML; /* substring energy */
}
{
int *FF; /* rotate the auxilliary arrays */
FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF;
FF = cc1; cc1=cc; cc=FF;
for (j=1; j<=length; j++) {cc[j]=Fmi[j]=DMLi[j]=INF; }
}
}
/* calculate energies of 5' and 3' fragments */
f5[TURN+1]= 0;
/* duplicated code may be faster than conditions inside loop ;) */
switch(dangle_model){
/* dont use dangling end and mismatch contributions at all */
case 0: for(j=TURN+2; j<=length; j++){
f5[j] = f5[j-1];
for (i=j-TURN-1; i>1; i--){
if(with_gquad){
f5[j] = MIN2(f5[j], f5[i-1] + ggg[indx[j]+i]);
}
type = ptype[indx[j]+i];
if(!type) continue;
en = c[indx[j]+i];
f5[j] = MIN2(f5[j], f5[i-1] + en + E_ExtLoop(type, -1, -1, P));
}
if(with_gquad){
f5[j] = MIN2(f5[j], ggg[indx[j]+1]);
}
type=ptype[indx[j]+1];
if(!type) continue;
en = c[indx[j]+1];
f5[j] = MIN2(f5[j], en + E_ExtLoop(type, -1, -1, P));
}
break;
/* always use dangles on both sides */
case 2: for(j=TURN+2; j<length; j++){
f5[j] = f5[j-1];
for (i=j-TURN-1; i>1; i--){
if(with_gquad){
f5[j] = MIN2(f5[j], f5[i-1] + ggg[indx[j]+i]);
}
type = ptype[indx[j]+i];
if(!type) continue;
en = c[indx[j]+i];
f5[j] = MIN2(f5[j], f5[i-1] + en + E_ExtLoop(type, S1[i-1], S1[j+1], P));
}
if(with_gquad){
f5[j] = MIN2(f5[j], ggg[indx[j]+1]);
}
type=ptype[indx[j]+1];
if(!type) continue;
en = c[indx[j]+1];
f5[j] = MIN2(f5[j], en + E_ExtLoop(type, -1, S1[j+1], P));
}
f5[length] = f5[length-1];
for (i=length-TURN-1; i>1; i--){
if(with_gquad){
f5[length] = MIN2(f5[length], f5[i-1] + ggg[indx[length]+i]);
}
type = ptype[indx[length]+i];
if(!type) continue;
en = c[indx[length]+i];
f5[length] = MIN2(f5[length], f5[i-1] + en + E_ExtLoop(type, S1[i-1], -1, P));
}
if(with_gquad){
f5[length] = MIN2(f5[length], ggg[indx[length]+1]);
}
type=ptype[indx[length]+1];
if(!type) break;
en = c[indx[length]+1];
f5[length] = MIN2(f5[length], en + E_ExtLoop(type, -1, -1, P));
break;
/* normal dangles, aka dangle_model = 1 || 3 */
default: for(j=TURN+2; j<=length; j++){
f5[j] = f5[j-1];
for (i=j-TURN-1; i>1; i--){
if(with_gquad){
f5[j] = MIN2(f5[j], f5[i-1] + ggg[indx[j]+i]);
}
type = ptype[indx[j]+i];
if(type){
en = c[indx[j]+i];
f5[j] = MIN2(f5[j], f5[i-1] + en + E_ExtLoop(type, -1, -1, P));
f5[j] = MIN2(f5[j], f5[i-2] + en + E_ExtLoop(type, S1[i-1], -1, P));
}
type = ptype[indx[j-1]+i];
if(type){
en = c[indx[j-1]+i];
f5[j] = MIN2(f5[j], f5[i-1] + en + E_ExtLoop(type, -1, S1[j], P));
f5[j] = MIN2(f5[j], f5[i-2] + en + E_ExtLoop(type, S1[i-1], S1[j], P));
}
}
if(with_gquad){
f5[j] = MIN2(f5[j], ggg[indx[j]+1]);
}
type = ptype[indx[j]+1];
if(type) f5[j] = MIN2(f5[j], c[indx[j]+1] + E_ExtLoop(type, -1, -1, P));
type = ptype[indx[j-1]+1];
if(type) f5[j] = MIN2(f5[j], c[indx[j-1]+1] + E_ExtLoop(type, -1, S1[j], P));
}
}
return f5[length];
}
#include "circfold.inc"
/**
*** trace back through the "c", "f5" and "fML" arrays to get the
*** base pairing list. No search for equivalent structures is done.
*** This is fast, since only few structure elements are recalculated.
***
*** normally s=0.
*** If s>0 then s items have been already pushed onto the sector stack
**/
PRIVATE void backtrack(const char *string, int s) {
int i, j, ij, k, l1, mm5, mm3, length, energy, en, new;
int no_close, type, type_2, tt, minq, maxq, c0, c1, c2, c3;
int bonus;
int b=0;
int dangle_model = P->model_details.dangles;
length = strlen(string);
if (s==0) {
sector[++s].i = 1;
sector[s].j = length;
sector[s].ml = (backtrack_type=='M') ? 1 : ((backtrack_type=='C')? 2: 0);
}
while (s>0) {
int ml, fij, fi, cij, traced, i1, j1, p, q, jj=0, gq=0;
int canonical = 1; /* (i,j) closes a canonical structure */
i = sector[s].i;
j = sector[s].j;
ml = sector[s--].ml; /* ml is a flag indicating if backtracking is to
occur in the fML- (1) or in the f-array (0) */
if (ml==2) {
base_pair2[++b].i = i;
base_pair2[b].j = j;
goto repeat1;
}
else if(ml==7) { /* indicates that i,j are enclosing a gquadruplex */
/* actually, do something here */
}
if (j < i+TURN+1) continue; /* no more pairs in this interval */
fij = (ml == 1)? fML[indx[j]+i] : f5[j];
fi = (ml == 1)?(fML[indx[j-1]+i]+P->MLbase): f5[j-1];
if (fij == fi) { /* 3' end is unpaired */
sector[++s].i = i;
sector[s].j = j-1;
sector[s].ml = ml;
continue;
}
if (ml == 0) { /* backtrack in f5 */
switch(dangle_model){
case 0: /* j is paired. Find pairing partner */
for(k=j-TURN-1,traced=0; k>=1; k--){
if(with_gquad){
if(fij == f5[k-1] + ggg[indx[j]+k]){
/* found the decomposition */
traced = j; jj = k - 1; gq = 1;
break;
}
}
type = ptype[indx[j]+k];
if(type)
if(fij == E_ExtLoop(type, -1, -1, P) + c[indx[j]+k] + f5[k-1]){
traced=j; jj = k-1;
break;
}
}
break;
case 2: mm3 = (j<length) ? S1[j+1] : -1;
for(k=j-TURN-1,traced=0; k>=1; k--){
if(with_gquad){
if(fij == f5[k-1] + ggg[indx[j]+k]){
/* found the decomposition */
traced = j; jj = k - 1; gq = 1;
break;
}
}
type = ptype[indx[j]+k];
if(type)
if(fij == E_ExtLoop(type, (k>1) ? S1[k-1] : -1, mm3, P) + c[indx[j]+k] + f5[k-1]){
traced=j; jj = k-1;
break;
}
}
break;
default: for(traced = 0, k=j-TURN-1; k>1; k--){
if(with_gquad){
if(fij == f5[k-1] + ggg[indx[j]+k]){
/* found the decomposition */
traced = j; jj = k - 1; gq = 1;
break;
}
}
type = ptype[indx[j] + k];
if(type){
en = c[indx[j] + k];
if(fij == f5[k-1] + en + E_ExtLoop(type, -1, -1, P)){
traced = j;
jj = k-1;
break;
}
if(fij == f5[k-2] + en + E_ExtLoop(type, S1[k-1], -1, P)){
traced = j;
jj = k-2;
break;
}
}
type = ptype[indx[j-1] + k];
if(type){
en = c[indx[j-1] + k];
if(fij == f5[k-1] + en + E_ExtLoop(type, -1, S1[j], P)){
traced = j-1;
jj = k-1;
break;
}
if(fij == f5[k-2] + en + E_ExtLoop(type, S1[k-1], S1[j], P)){
traced = j-1;
jj = k-2;
break;
}
}
}
if(!traced){
if(with_gquad){
if(fij == ggg[indx[j]+1]){
/* found the decomposition */
traced = j; jj = 0; gq = 1;
break;
}
}
type = ptype[indx[j]+1];
if(type){
if(fij == c[indx[j]+1] + E_ExtLoop(type, -1, -1, P)){
traced = j;
jj = 0;
break;
}
}
type = ptype[indx[j-1]+1];
if(type){
if(fij == c[indx[j-1]+1] + E_ExtLoop(type, -1, S1[j], P)){
traced = j-1;
jj = 0;
break;
}
}
}
break;
}
if (!traced){
fprintf(stderr, "%s\n", string);
nrerror("backtrack failed in f5");
}
/* push back the remaining f5 portion */
sector[++s].i = 1;
sector[s].j = jj;
sector[s].ml = ml;
/* trace back the base pair found */
i=k; j=traced;
if(with_gquad && gq){
/* goto backtrace of gquadruplex */
goto repeat_gquad;
}
base_pair2[++b].i = i;
base_pair2[b].j = j;
goto repeat1;
}
else { /* trace back in fML array */
if (fML[indx[j]+i+1]+P->MLbase == fij) { /* 5' end is unpaired */
sector[++s].i = i+1;
sector[s].j = j;
sector[s].ml = ml;
continue;
}
ij = indx[j]+i;
if(with_gquad){
if(fij == ggg[ij] + E_MLstem(0, -1, -1, P)){
/* go to backtracing of quadruplex */
goto repeat_gquad;
}
}
tt = ptype[ij];
en = c[ij];
switch(dangle_model){
case 0: if(fij == en + E_MLstem(tt, -1, -1, P)){
base_pair2[++b].i = i;
base_pair2[b].j = j;
goto repeat1;
}
break;
case 2: if(fij == en + E_MLstem(tt, S1[i-1], S1[j+1], P)){
base_pair2[++b].i = i;
base_pair2[b].j = j;
goto repeat1;
}
break;
default: if(fij == en + E_MLstem(tt, -1, -1, P)){
base_pair2[++b].i = i;
base_pair2[b].j = j;
goto repeat1;
}
tt = ptype[ij+1];
if(fij == c[ij+1] + E_MLstem(tt, S1[i], -1, P) + P->MLbase){
base_pair2[++b].i = ++i;
base_pair2[b].j = j;
goto repeat1;
}
tt = ptype[indx[j-1]+i];
if(fij == c[indx[j-1]+i] + E_MLstem(tt, -1, S1[j], P) + P->MLbase){
base_pair2[++b].i = i;
base_pair2[b].j = --j;
goto repeat1;
}
tt = ptype[indx[j-1]+i+1];
if(fij == c[indx[j-1]+i+1] + E_MLstem(tt, S1[i], S1[j], P) + 2*P->MLbase){
base_pair2[++b].i = ++i;
base_pair2[b].j = --j;
goto repeat1;
}
break;
}
for(k = i + 1 + TURN; k <= j - 2 - TURN; k++)
if(fij == (fML[indx[k]+i]+fML[indx[j]+k+1]))
break;
if ((dangle_model==3)&&(k > j - 2 - TURN)) { /* must be coax stack */
ml = 2;
for (k = i+1+TURN; k <= j - 2 - TURN; k++) {
type = rtype[ptype[indx[k]+i]];
type_2 = rtype[ptype[indx[j]+k+1]];
if (type && type_2)
if (fij == c[indx[k]+i]+c[indx[j]+k+1]+P->stack[type][type_2]+
2*P->MLintern[1])
break;
}
}
sector[++s].i = i;
sector[s].j = k;
sector[s].ml = ml;
sector[++s].i = k+1;
sector[s].j = j;
sector[s].ml = ml;
if (k>j-2-TURN) nrerror("backtrack failed in fML");
continue;
}
repeat1:
/*----- begin of "repeat:" -----*/
ij = indx[j]+i;
if (canonical) cij = c[ij];
type = ptype[ij];
bonus = 0;
if (struct_constrained) {
if ((BP[i]==j)||(BP[i]==-1)||(BP[i]==-2)) bonus -= BONUS;
if ((BP[j]==-1)||(BP[j]==-3)) bonus -= BONUS;
}
if (noLonelyPairs)
if (cij == c[ij]){
/* (i.j) closes canonical structures, thus
(i+1.j-1) must be a pair */
type_2 = ptype[indx[j-1]+i+1]; type_2 = rtype[type_2];
cij -= P->stack[type][type_2] + bonus;
base_pair2[++b].i = i+1;
base_pair2[b].j = j-1;
i++; j--;
canonical=0;
goto repeat1;
}
canonical = 1;
no_close = (((type==3)||(type==4))&&no_closingGU&&(bonus==0));
if (no_close) {
if (cij == FORBIDDEN) continue;
} else
if (cij == E_Hairpin(j-i-1, type, S1[i+1], S1[j-1],string+i-1, P)+bonus)
continue;
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1); p++) {
minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = j-1; q >= minq; q--) {
type_2 = ptype[indx[q]+p];
if (type_2==0) continue;
type_2 = rtype[type_2];
if (no_closingGU)
if (no_close||(type_2==3)||(type_2==4))
if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */
/* energy = oldLoopEnergy(i, j, p, q, type, type_2); */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2,
S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
new = energy+c[indx[q]+p]+bonus;
traced = (cij == new);
if (traced) {
base_pair2[++b].i = p;
base_pair2[b].j = q;
i = p, j = q;
goto repeat1;
}
}
}
/* end of repeat: --------------------------------------------------*/
/* (i.j) must close a multi-loop */
tt = rtype[type];
i1 = i+1; j1 = j-1;
if(with_gquad){
/*
The case that is handled here actually resembles something like
an interior loop where the enclosing base pair is of regular
kind and the enclosed pair is not a canonical one but a g-quadruplex
that should then be decomposed further...
*/
if(backtrack_GQuad_IntLoop(cij - bonus, i, j, type, S, ggg, indx, &p, &q, P)){
i = p; j = q;
goto repeat_gquad;
}
}
sector[s+1].ml = sector[s+2].ml = 1;
switch(dangle_model){
case 0: en = cij - E_MLstem(tt, -1, -1, P) - P->MLclosing - bonus;
for(k = i+2+TURN; k < j-2-TURN; k++){
if(en == fML[indx[k]+i+1] + fML[indx[j-1]+k+1])
break;
}
break;
case 2: en = cij - E_MLstem(tt, S1[j-1], S1[i+1], P) - P->MLclosing - bonus;
for(k = i+2+TURN; k < j-2-TURN; k++){
if(en == fML[indx[k]+i+1] + fML[indx[j-1]+k+1])
break;
}
break;
default: for(k = i+2+TURN; k < j-2-TURN; k++){
en = cij - P->MLclosing - bonus;
if(en == fML[indx[k]+i+1] + fML[indx[j-1]+k+1] + E_MLstem(tt, -1, -1, P)){
break;
}
else if(en == fML[indx[k]+i+2] + fML[indx[j-1]+k+1] + E_MLstem(tt, -1, S1[i+1], P) + P->MLbase){
i1 = i+2;
break;
}
else if(en == fML[indx[k]+i+1] + fML[indx[j-2]+k+1] + E_MLstem(tt, S1[j-1], -1, P) + P->MLbase){
j1 = j-2;
break;
}
else if(en == fML[indx[k]+i+2] + fML[indx[j-2]+k+1] + E_MLstem(tt, S1[j-1], S1[i+1], P) + 2*P->MLbase){
i1 = i+2;
j1 = j-2;
break;
}
/* coaxial stacking of (i.j) with (i+1.k) or (k.j-1) */
/* use MLintern[1] since coax stacked pairs don't get TerminalAU */
if(dangle_model == 3){
type_2 = rtype[ptype[indx[k]+i+1]];
if (type_2) {
en = c[indx[k]+i+1]+P->stack[type][type_2]+fML[indx[j-1]+k+1];
if (cij == en+2*P->MLintern[1]+P->MLclosing) {
ml = 2;
sector[s+1].ml = 2;
traced = 1;
break;
}
}
type_2 = rtype[ptype[indx[j-1]+k+1]];
if (type_2) {
en = c[indx[j-1]+k+1]+P->stack[type][type_2]+fML[indx[k]+i+1];
if (cij == en+2*P->MLintern[1]+P->MLclosing) {
sector[s+2].ml = 2;
traced = 1;
break;
}
}
}
}
break;
}
if (k<=j-3-TURN) { /* found the decomposition */
sector[++s].i = i1;
sector[s].j = k;
sector[++s].i = k+1;
sector[s].j = j1;
} else {
#if 0
/* Y shaped ML loops fon't work yet */
if (dangle_model==3) {
d5 = P->dangle5[tt][S1[j-1]];
d3 = P->dangle3[tt][S1[i+1]];
/* (i,j) must close a Y shaped ML loop with coax stacking */
if (cij == fML[indx[j-2]+i+2] + mm + d3 + d5 + P->MLbase + P->MLbase) {
i1 = i+2;
j1 = j-2;
} else if (cij == fML[indx[j-2]+i+1] + mm + d5 + P->MLbase)
j1 = j-2;
else if (cij == fML[indx[j-1]+i+2] + mm + d3 + P->MLbase)
i1 = i+2;
else /* last chance */
if (cij != fML[indx[j-1]+i+1] + mm + P->MLbase)
fprintf(stderr, "backtracking failed in repeat");
/* if we arrive here we can express cij via fML[i1,j1]+dangles */
sector[++s].i = i1;
sector[s].j = j1;
}
else
#endif
nrerror("backtracking failed in repeat");
}
continue; /* this is a workarround to not accidentally proceed in the following block */
repeat_gquad:
/*
now we do some fancy stuff to backtrace the stacksize and linker lengths
of the g-quadruplex that should reside within position i,j
*/
{
int l[3], L, a;
L = -1;
get_gquad_pattern_mfe(S, i, j, P, &L, l);
if(L != -1){
/* fill the G's of the quadruplex into base_pair2 */
for(a=0;a<L;a++){
base_pair2[++b].i = i+a;
base_pair2[b].j = i+a;
base_pair2[++b].i = i+L+l[0]+a;
base_pair2[b].j = i+L+l[0]+a;
base_pair2[++b].i = i+L+l[0]+L+l[1]+a;
base_pair2[b].j = i+L+l[0]+L+l[1]+a;
base_pair2[++b].i = i+L+l[0]+L+l[1]+L+l[2]+a;
base_pair2[b].j = i+L+l[0]+L+l[1]+L+l[2]+a;
}
goto repeat_gquad_exit;
}
nrerror("backtracking failed in repeat_gquad");
}
repeat_gquad_exit:
asm("nop");
} /* end of infinite while loop */
base_pair2[0].i = b; /* save the total number of base pairs */
}
PUBLIC char *backtrack_fold_from_pair(char *sequence, int i, int j) {
char *structure;
sector[1].i = i;
sector[1].j = j;
sector[1].ml = 2;
base_pair2[0].i=0;
S = encode_sequence(sequence, 0);
S1 = encode_sequence(sequence, 1);
backtrack(sequence, 1);
structure = (char *) space((strlen(sequence)+1)*sizeof(char));
parenthesis_structure(structure, base_pair2, strlen(sequence));
free(S);free(S1);
return structure;
}
/*---------------------------------------------------------------------------*/
PUBLIC void letter_structure(char *structure, bondT *bp, int length){
int n, k, x, y;
char alpha[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
for (n = 0; n < length; structure[n++] = ' ');
structure[length] = '\0';
for (n = 0, k = 1; k <= bp[0].i; k++) {
y = bp[k].j;
x = bp[k].i;
if (x-1 > 0 && y+1 <= length) {
if (structure[x-2] != ' ' && structure[y] == structure[x-2]) {
structure[x-1] = structure[x-2];
structure[y-1] = structure[x-1];
continue;
}
}
if (structure[x] != ' ' && structure[y-2] == structure[x]) {
structure[x-1] = structure[x];
structure[y-1] = structure[x-1];
continue;
}
n++;
structure[x-1] = alpha[n-1];
structure[y-1] = alpha[n-1];
}
}
/*---------------------------------------------------------------------------*/
PUBLIC void parenthesis_structure(char *structure, bondT *bp, int length){
int n, k;
for (n = 0; n < length; structure[n++] = '.');
structure[length] = '\0';
for (k = 1; k <= bp[0].i; k++){
if(bp[k].i == bp[k].j){ /* Gquad bonds are marked as bp[i].i == bp[i].j */
structure[bp[k].i-1] = '+';
} else { /* the following ones are regular base pairs */
structure[bp[k].i-1] = '(';
structure[bp[k].j-1] = ')';
}
}
}
PUBLIC void parenthesis_zuker(char *structure, bondT *bp, int length){
int k, i, j, temp;
for (k = 0; k < length; structure[k++] = '.');
structure[length] = '\0';
for (k = 1; k <= bp[0].i; k++) {
i=bp[k].i;
j=bp[k].j;
if (i>length) i-=length;
if (j>length) j-=length;
if (i>j) {
temp=i; i=j; j=temp;
}
if(i == j){ /* Gquad bonds are marked as bp[i].i == bp[i].j */
structure[i-1] = '+';
} else { /* the following ones are regular base pairs */
structure[i-1] = '(';
structure[j-1] = ')';
}
}
}
/*---------------------------------------------------------------------------*/
PUBLIC void update_fold_params(void){
update_fold_params_par(NULL);
}
PUBLIC void update_fold_params_par(paramT *parameters){
if(P) free(P);
if(parameters){
P = get_parameter_copy(parameters);
} else {
model_detailsT md;
set_model_details(&md);
P = get_scaled_parameters(temperature, md);
}
make_pair_matrix();
if (init_length < 0) init_length=0;
}
/*---------------------------------------------------------------------------*/
PUBLIC float energy_of_structure(const char *string, const char *structure, int verbosity_level){
return energy_of_struct_par(string, structure, NULL, verbosity_level);
}
PUBLIC float energy_of_struct_par(const char *string,
const char *structure,
paramT *parameters,
int verbosity_level){
int energy;
short *ss, *ss1;
update_fold_params_par(parameters);
if (strlen(structure)!=strlen(string))
nrerror("energy_of_struct: string and structure have unequal length");
/* save the S and S1 pointers in case they were already in use */
ss = S; ss1 = S1;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
pair_table = make_pair_table(structure);
energy = energy_of_structure_pt(string, pair_table, S, S1, verbosity_level);
free(pair_table);
free(S); free(S1);
S=ss; S1=ss1;
return (float) energy/100.;
}
/* returns a correction term that may be added to the energy retrieved
from energy_of_struct_par() to correct misinterpreted loops. This
correction is necessary since energy_of_struct_par() will forget
about the existance of gquadruplexes and just treat them as unpaired
regions.
recursive variant
*/
PRIVATE int en_corr_of_loop_gquad(int i,
int j,
const char *string,
const char *structure,
short *pt,
int *loop_idx,
const short *s1){
int pos, energy, p, q, r, s, u, type, type2;
int L, l[3];
energy = 0;
q = i;
while((pos = parse_gquad(structure + q-1, &L, l)) > 0){
q += pos-1;
p = q - 4*L - l[0] - l[1] - l[2] + 1;
if(q > j) break;
/* we've found the first g-quadruplex at position [p,q] */
energy += E_gquad(L, l, P);
/* check if it's enclosed in a base pair */
if(loop_idx[p] == 0){ q++; continue; /* g-quad in exterior loop */}
else{
energy += E_MLstem(0, -1, -1, P); /* do not forget to remove this energy if
the gquad is the only one surrounded by
the enclosing pair
*/
/* find its enclosing pair */
int num_elem, num_g, elem_i, elem_j, up_mis;
num_elem = 0;
num_g = 1;
r = p - 1;
up_mis = q - p + 1;
/* seek for first pairing base located 5' of the g-quad */
for(r = p - 1; !pt[r] && (r >= i); r--);
if(r < i) nrerror("this should not happen");
if(r < pt[r]){ /* found the enclosing pair */
s = pt[r];
} else {
num_elem++;
elem_i = pt[r];
elem_j = r;
r = pt[r]-1 ;
/* seek for next pairing base 5' of r */
for(; !pt[r] && (r >= i); r--);
if(r < i) nrerror("so nich");
if(r < pt[r]){ /* found the enclosing pair */
s = pt[r];
} else {
/* hop over stems and unpaired nucleotides */
while((r > pt[r]) && (r >= i)){
if(pt[r]){ r = pt[r]; num_elem++;}
r--;
}
if(r < i) nrerror("so nich");
s = pt[r]; /* found the enclosing pair */
}
}
/* now we have the enclosing pair (r,s) */
u = q+1;
/* we know everything about the 5' part of this loop so check the 3' part */
while(u<s){
if(structure[u-1] == '.') u++;
else if (structure[u-1] == '+'){ /* found another gquad */
pos = parse_gquad(structure + u - 1, &L, l);
if(pos > 0){
energy += E_gquad(L, l, P) + E_MLstem(0, -1, -1, P);
up_mis += pos;
u += pos;
num_g++;
}
} else { /* we must have found a stem */
if(!(u < pt[u])) nrerror("wtf!");
num_elem++; elem_i = u; elem_j = pt[u];
energy += en_corr_of_loop_gquad(u, pt[u], string, structure, pt, loop_idx, s1);
u = pt[u] + 1;
}
}
if(u!=s) nrerror("what the hell");
else{ /* we are done since we've found no other 3' structure element */
switch(num_elem){
/* g-quad was misinterpreted as hairpin closed by (r,s) */
case 0: /* if(num_g == 1)
if((p-r-1 == 0) || (s-q-1 == 0))
nrerror("too few unpaired bases");
*/
type = pair[s1[r]][s1[s]];
if(dangles == 2)
energy += P->mismatchI[type][s1[r+1]][s1[s-1]];
if(type > 2)
energy += P->TerminalAU;
energy += P->internal_loop[s - r - 1 - up_mis];
energy -= E_MLstem(0, -1, -1, P);
energy -= E_Hairpin(s - r - 1,
type,
s1[r + 1],
s1[s - 1],
string + r - 1,
P);
break;
/* g-quad was misinterpreted as interior loop closed by (r,s) with enclosed pair (elem_i, elem_j) */
case 1: type = pair[s1[r]][s1[s]];
type2 = pair[s1[elem_i]][s1[elem_j]];
energy += P->MLclosing
+ E_MLstem(rtype[type], s1[s-1], s1[r+1], P)
+ (elem_i - r - 1 + s - elem_j - 1 - up_mis) * P->MLbase
+ E_MLstem(type2, s1[elem_i-1], s1[elem_j+1], P);
energy -= E_IntLoop(elem_i - r - 1,
s - elem_j - 1,
type,
rtype[type2],
s1[r + 1],
s1[s - 1],
s1[elem_i - 1],
s1[elem_j + 1],
P);
break;
/* gquad was misinterpreted as unpaired nucleotides in a multiloop */
default: energy -= (up_mis) * P->MLbase;
break;
}
}
q = s+1;
}
}
return energy;
}
PUBLIC float
energy_of_gquad_structure(const char *string,
const char *structure,
int verbosity_level){
return energy_of_gquad_struct_par(string, structure, NULL, verbosity_level);
}
PUBLIC float
energy_of_gquad_struct_par( const char *string,
const char *structure,
paramT *parameters,
int verbosity_level){
int energy, gge, *loop_idx;
short *ss, *ss1;
update_fold_params_par(parameters);
if (strlen(structure)!=strlen(string))
nrerror("energy_of_struct: string and structure have unequal length");
/* save the S and S1 pointers in case they were already in use */
ss = S; ss1 = S1;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
/* the pair_table looses every information about the gquad position
thus we have to find add the energy contributions for each loop
that contains a gquad by ourself, substract all miscalculated
contributions, i.e. loops that actually contain a gquad, from
energy_of_structure_pt()
*/
pair_table = make_pair_table(structure);
energy = energy_of_structure_pt(string, pair_table, S, S1, verbosity_level);
loop_idx = make_loop_index_pt(pair_table);
gge = en_corr_of_loop_gquad(1, S[0], string, structure, pair_table, loop_idx, S1);
energy += gge;
free(pair_table);
free(loop_idx);
free(S); free(S1);
S=ss; S1=ss1;
return (float) energy/100.;
}
PUBLIC int energy_of_structure_pt(const char *string,
short *ptable,
short *s,
short *s1,
int verbosity_level){
return energy_of_struct_pt_par(string, ptable, s, s1, NULL, verbosity_level);
}
PUBLIC int energy_of_struct_pt_par( const char *string,
short *ptable,
short *s,
short *s1,
paramT *parameters,
int verbosity_level){
/* auxiliary function for kinfold,
for most purposes call energy_of_struct instead */
int i, length, energy;
short *ss, *ss1;
update_fold_params_par(parameters);
pair_table = ptable;
ss = S;
ss1 = S1;
S = s;
S1 = s1;
length = S[0];
/* energy = backtrack_type=='M' ? ML_Energy(0, 0) : ML_Energy(0, 1); */
energy = backtrack_type=='M' ? energy_of_ml_pt(0, ptable) : energy_of_extLoop_pt(0, ptable);
if (verbosity_level>0)
printf("External loop : %5d\n", energy);
for (i=1; i<=length; i++) {
if (pair_table[i]==0) continue;
energy += stack_energy(i, string, verbosity_level);
i=pair_table[i];
}
for (i=1; !SAME_STRAND(i,length); i++) {
if (!SAME_STRAND(i,pair_table[i])) {
energy+=P->DuplexInit;
break;
}
}
S = ss;
S1 = ss1;
return energy;
}
PUBLIC float energy_of_circ_structure(const char *string,
const char *structure,
int verbosity_level){
return energy_of_circ_struct_par(string, structure, NULL, verbosity_level);
}
PUBLIC float energy_of_circ_struct_par( const char *string,
const char *structure,
paramT *parameters,
int verbosity_level){
int i, j, length, energy=0, en0, degree=0, type;
short *ss, *ss1;
update_fold_params_par(parameters);
int dangle_model = P->model_details.dangles;
if (strlen(structure)!=strlen(string))
nrerror("energy_of_struct: string and structure have unequal length");
/* save the S and S1 pointers in case they were already in use */
ss = S; ss1 = S1;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
pair_table = make_pair_table(structure);
length = S[0];
for (i=1; i<=length; i++) {
if (pair_table[i]==0) continue;
degree++;
energy += stack_energy(i, string, verbosity_level);
i=pair_table[i];
}
if (degree==0) return 0.;
for (i=1; pair_table[i]==0; i++);
j = pair_table[i];
type=pair[S[j]][S[i]];
if (type==0) type=7;
if (degree==1) {
char loopseq[10];
int u, si1, sj1;
for (i=1; pair_table[i]==0; i++);
u = length-j + i-1;
if (u<7) {
strcpy(loopseq , string+j-1);
strncat(loopseq, string, i);
}
si1 = (i==1)?S1[length] : S1[i-1];
sj1 = (j==length)?S1[1] : S1[j+1];
en0 = E_Hairpin(u, type, sj1, si1, loopseq, P);
} else
if (degree==2) {
int p,q, u1,u2, si1, sq1, type_2;
for (p=j+1; pair_table[p]==0; p++);
q=pair_table[p];
u1 = p-j-1;
u2 = i-1 + length-q;
type_2 = pair[S[q]][S[p]];
if (type_2==0) type_2=7;
si1 = (i==1)? S1[length] : S1[i-1];
sq1 = (q==length)? S1[1] : S1[q+1];
en0 = E_IntLoop(u1, u2, type, type_2,
S1[j+1], si1, S1[p-1], sq1,P);
} else { /* degree > 2 */
en0 = ML_Energy(0, 0) - P->MLintern[0];
if (dangle_model) {
int d5, d3;
if (pair_table[1]) {
j = pair_table[1];
type = pair[S[1]][S[j]];
if (dangle_model==2)
en0 += P->dangle5[type][S1[length]];
else { /* dangle_model==1 */
if (pair_table[length]==0) {
d5 = P->dangle5[type][S1[length]];
if (pair_table[length-1]!=0) {
int tt;
tt = pair[S[pair_table[length-1]]][S[length-1]];
d3 = P->dangle3[tt][S1[length]];
if (d3<d5) d5 = 0;
else d5 -= d3;
}
en0 += d5;
}
}
}
if (pair_table[length]) {
i = pair_table[length];
type = pair[S[i]][S[length]];
if (dangle_model==2)
en0 += P->dangle3[type][S1[1]];
else { /* dangle_model==1 */
if (pair_table[1]==0) {
d3 = P->dangle3[type][S1[1]];
if (pair_table[2]) {
int tt;
tt = pair[S[2]][S[pair_table[2]]];
d5 = P->dangle5[tt][1];
if (d5<d3) d3=0;
else d3 -= d5;
}
en0 += d3;
}
}
}
}
}
if (verbosity_level>0)
printf("External loop : %5d\n", en0);
energy += en0;
/* fprintf(stderr, "ext loop degree %d tot %d\n", degree, energy); */
free(S); free(S1);
S=ss; S1=ss1;
return (float) energy/100.0;
}
/*---------------------------------------------------------------------------*/
PRIVATE int stack_energy(int i, const char *string, int verbosity_level)
{
/* calculate energy of substructure enclosed by (i,j) */
int ee, energy = 0;
int j, p, q, type;
j=pair_table[i];
type = pair[S[i]][S[j]];
if (type==0) {
type=7;
if (verbosity_level>=0)
fprintf(stderr,"WARNING: bases %d and %d (%c%c) can't pair!\n", i, j,
string[i-1],string[j-1]);
}
p=i; q=j;
while (p<q) { /* process all stacks and interior loops */
int type_2;
while (pair_table[++p]==0);
while (pair_table[--q]==0);
if ((pair_table[q]!=(short)p)||(p>q)) break;
type_2 = pair[S[q]][S[p]];
if (type_2==0) {
type_2=7;
if (verbosity_level>=0)
fprintf(stderr,"WARNING: bases %d and %d (%c%c) can't pair!\n", p, q,
string[p-1],string[q-1]);
}
/* energy += LoopEnergy(i, j, p, q, type, type_2); */
if ( SAME_STRAND(i,p) && SAME_STRAND(q,j) )
ee = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1],P);
else
ee = energy_of_extLoop_pt(cut_in_loop(i), pair_table);
if (verbosity_level>0)
printf("Interior loop (%3d,%3d) %c%c; (%3d,%3d) %c%c: %5d\n",
i,j,string[i-1],string[j-1],p,q,string[p-1],string[q-1], ee);
energy += ee;
i=p; j=q; type = rtype[type_2];
} /* end while */
/* p,q don't pair must have found hairpin or multiloop */
if (p>q) { /* hair pin */
if (SAME_STRAND(i,j))
ee = E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], string+i-1, P);
else
ee = energy_of_extLoop_pt(cut_in_loop(i), pair_table);
energy += ee;
if (verbosity_level>0)
printf("Hairpin loop (%3d,%3d) %c%c : %5d\n",
i, j, string[i-1],string[j-1], ee);
return energy;
}
/* (i,j) is exterior pair of multiloop */
while (p<j) {
/* add up the contributions of the substructures of the ML */
energy += stack_energy(p, string, verbosity_level);
p = pair_table[p];
/* search for next base pair in multiloop */
while (pair_table[++p]==0);
}
{
int ii;
ii = cut_in_loop(i);
ee = (ii==0) ? energy_of_ml_pt(i, pair_table) : energy_of_extLoop_pt(ii, pair_table);
}
energy += ee;
if (verbosity_level>0)
printf("Multi loop (%3d,%3d) %c%c : %5d\n",
i,j,string[i-1],string[j-1],ee);
return energy;
}
/*---------------------------------------------------------------------------*/
/**
*** Calculate the energy contribution of
*** stabilizing dangling-ends/mismatches
*** for all stems branching off the exterior
*** loop
**/
PRIVATE int energy_of_extLoop_pt(int i, short *pair_table) {
int energy, mm5, mm3;
int p, q, q_prev;
int length = (int)pair_table[0];
/* helper variables for dangles == 1 case */
int E3_available; /* energy of 5' part where 5' mismatch is available for current stem */
int E3_occupied; /* energy of 5' part where 5' mismatch is unavailable for current stem */
int dangle_model = P->model_details.dangles;
/* initialize vars */
energy = 0;
p = (i==0) ? 1 : i;
q_prev = -1;
if(dangle_model%2 == 1){
E3_available = INF;
E3_occupied = 0;
}
/* seek to opening base of first stem */
while(p <= length && !pair_table[p]) p++;
while(p < length){
int tt;
/* p must have a pairing partner */
q = (int)pair_table[p];
/* get type of base pair (p,q) */
tt = pair[S[p]][S[q]];
if(tt==0) tt=7;
switch(dangle_model){
/* no dangles */
case 0: energy += E_ExtLoop(tt, -1, -1, P);
break;
/* the beloved double dangles */
case 2: mm5 = ((SAME_STRAND(p-1,p)) && (p>1)) ? S1[p-1] : -1;
mm3 = ((SAME_STRAND(q,q+1)) && (q<length)) ? S1[q+1] : -1;
energy += E_ExtLoop(tt, mm5, mm3, P);
break;
default: {
int tmp;
if(q_prev + 2 < p){
E3_available = MIN2(E3_available, E3_occupied);
E3_occupied = E3_available;
}
mm5 = ((SAME_STRAND(p-1,p)) && (p>1) && !pair_table[p-1]) ? S1[p-1] : -1;
mm3 = ((SAME_STRAND(q,q+1)) && (q<length) && !pair_table[q+1]) ? S1[q+1] : -1;
tmp = MIN2(
E3_occupied + E_ExtLoop(tt, -1, mm3, P),
E3_available + E_ExtLoop(tt, mm5, mm3, P)
);
E3_available = MIN2(
E3_occupied + E_ExtLoop(tt, -1, -1, P),
E3_available + E_ExtLoop(tt, mm5, -1, P)
);
E3_occupied = tmp;
}
break;
} /* end switch dangle_model */
/* seek to the next stem */
p = q + 1;
q_prev = q;
while (p <= length && !pair_table[p]) p++;
if(p==i) break; /* cut was in loop */
}
if(dangle_model%2 == 1)
energy = MIN2(E3_occupied, E3_available);
return energy;
}
/**
*** i is the 5'-base of the closing pair
***
*** since each helix can coaxially stack with at most one of its
*** neighbors we need an auxiliarry variable cx_energy
*** which contains the best energy given that the last two pairs stack.
*** energy holds the best energy given the previous two pairs do not
*** stack (i.e. the two current helices may stack)
*** We don't allow the last helix to stack with the first, thus we have to
*** walk around the Loop twice with two starting points and take the minimum
***/
PRIVATE int energy_of_ml_pt(int i, short *pt){
int energy, cx_energy, tmp, tmp2, best_energy=INF;
int i1, j, p, q, q_prev, q_prev2, u, x, type, count, mm5, mm3, tt, ld5, new_cx, dang5, dang3, dang;
int mlintern[NBPAIRS+1];
/* helper variables for dangles == 1|5 case */
int E_mm5_available; /* energy of 5' part where 5' mismatch of current stem is available */
int E_mm5_occupied; /* energy of 5' part where 5' mismatch of current stem is unavailable */
int E2_mm5_available; /* energy of 5' part where 5' mismatch of current stem is available with possible 3' dangle for enclosing pair (i,j) */
int E2_mm5_occupied; /* energy of 5' part where 5' mismatch of current stem is unavailable with possible 3' dangle for enclosing pair (i,j) */
int dangle_model = P->model_details.dangles;
if(i >= pt[i])
nrerror("energy_of_ml_pt: i is not 5' base of a closing pair!");
j = (int)pt[i];
/* init the variables */
energy = 0;
p = i+1;
q_prev = i-1;
q_prev2 = i;
for (x = 0; x <= NBPAIRS; x++) mlintern[x] = P->MLintern[x];
/* seek to opening base of first stem */
while(p <= j && !pair_table[p]) p++;
u = p - i - 1;
switch(dangle_model){
case 0: while(p < j){
/* p must have a pairing partner */
q = (int)pair_table[p];
/* get type of base pair (p,q) */
tt = pair[S[p]][S[q]];
if(tt==0) tt=7;
energy += E_MLstem(tt, -1, -1, P);
/* seek to the next stem */
p = q + 1;
q_prev = q_prev2 = q;
while (p <= j && !pair_table[p]) p++;
u += p - q - 1; /* add unpaired nucleotides */
}
/* now lets get the energy of the enclosing stem */
type = pair[S[j]][S[i]]; if (type==0) type=7;
energy += E_MLstem(type, -1, -1, P);
break;
case 2: while(p < j){
/* p must have a pairing partner */
q = (int)pair_table[p];
/* get type of base pair (p,q) */
tt = pair[S[p]][S[q]];
if(tt==0) tt=7;
mm5 = (SAME_STRAND(p-1,p)) ? S1[p-1] : -1;
mm3 = (SAME_STRAND(q,q+1)) ? S1[q+1] : -1;
energy += E_MLstem(tt, mm5, mm3, P);
/* seek to the next stem */
p = q + 1;
q_prev = q_prev2 = q;
while (p <= j && !pair_table[p]) p++;
u += p - q - 1; /* add unpaired nucleotides */
}
type = pair[S[j]][S[i]]; if (type==0) type=7;
mm5 = ((SAME_STRAND(j-1,j)) && !pair_table[j-1]) ? S1[j-1] : -1;
mm3 = ((SAME_STRAND(i,i+1)) && !pair_table[i+1]) ? S1[i+1] : -1;
energy += E_MLstem(type, S1[j-1], S1[i+1], P);
break;
case 3: /* we treat helix stacking different */
for (count=0; count<2; count++) { /* do it twice */
ld5 = 0; /* 5' dangle energy on prev pair (type) */
if ( i==0 ) {
j = (unsigned int)pair_table[0]+1;
type = 0; /* no pair */
}
else {
j = (unsigned int)pair_table[i];
type = pair[S[j]][S[i]]; if (type==0) type=7;
/* prime the ld5 variable */
if (SAME_STRAND(j-1,j)) {
ld5 = P->dangle5[type][S1[j-1]];
if ((p=(unsigned int)pair_table[j-2]) && SAME_STRAND(j-2, j-1))
if (P->dangle3[pair[S[p]][S[j-2]]][S1[j-1]]<ld5) ld5 = 0;
}
}
i1=i; p = i+1; u=0;
energy = 0; cx_energy=INF;
do { /* walk around the multi-loop */
new_cx = INF;
/* hop over unpaired positions */
while (p <= (unsigned int)pair_table[0] && pair_table[p]==0) p++;
/* memorize number of unpaired positions */
u += p-i1-1;
/* get position of pairing partner */
if ( p == (unsigned int)pair_table[0]+1 ){
q = 0;tt = 0; /* virtual root pair */
} else {
q = (unsigned int)pair_table[p];
/* get type of base pair P->q */
tt = pair[S[p]][S[q]]; if (tt==0) tt=7;
}
energy += mlintern[tt];
cx_energy += mlintern[tt];
dang5=dang3=0;
if ((SAME_STRAND(p-1,p))&&(p>1))
dang5=P->dangle5[tt][S1[p-1]]; /* 5'dangle of pq pair */
if ((SAME_STRAND(i1,i1+1))&&(i1<(unsigned int)S[0]))
dang3 = P->dangle3[type][S1[i1+1]]; /* 3'dangle of previous pair */
switch (p-i1-1) {
case 0: /* adjacent helices */
if (i1!=0){
if (SAME_STRAND(i1,p)) {
new_cx = energy + P->stack[rtype[type]][rtype[tt]];
/* subtract 5'dangle and TerminalAU penalty */
new_cx += -ld5 - mlintern[tt]-mlintern[type]+2*mlintern[1];
}
ld5=0;
energy = MIN2(energy, cx_energy);
}
break;
case 1: /* 1 unpaired base between helices */
dang = MIN2(dang3, dang5);
energy = energy +dang; ld5 = dang - dang3;
/* may be problem here: Suppose
cx_energy>energy, cx_energy+dang5<energy
and the following helices are also stacked (i.e.
we'll subtract the dang5 again */
if (cx_energy+dang5 < energy) {
energy = cx_energy+dang5;
ld5 = dang5;
}
new_cx = INF; /* no coax stacking with mismatch for now */
break;
default: /* many unpaired base between helices */
energy += dang5 +dang3;
energy = MIN2(energy, cx_energy + dang5);
new_cx = INF; /* no coax stacking possible */
ld5 = dang5;
break;
}
type = tt;
cx_energy = new_cx;
i1 = q; p=q+1;
} while (q!=i);
best_energy = MIN2(energy, best_energy); /* don't use cx_energy here */
/* fprintf(stderr, "%6.2d\t", energy); */
/* skip a helix and start again */
while (pair_table[p]==0) p++;
if (i == (unsigned int)pair_table[p]) break;
i = (unsigned int)pair_table[p];
} /* end doing it twice */
energy = best_energy;
break;
default: E_mm5_available = E2_mm5_available = INF;
E_mm5_occupied = E2_mm5_occupied = 0;
while(p < j){
/* p must have a pairing partner */
q = (int)pair_table[p];
/* get type of base pair (p,q) */
tt = pair[S[p]][S[q]];
if(tt==0) tt=7;
if(q_prev + 2 < p){
E_mm5_available = MIN2(E_mm5_available, E_mm5_occupied);
E_mm5_occupied = E_mm5_available;
}
if(q_prev2 + 2 < p){
E2_mm5_available = MIN2(E2_mm5_available, E2_mm5_occupied);
E2_mm5_occupied = E2_mm5_available;
}
mm5 = ((SAME_STRAND(p-1,p)) && !pair_table[p-1]) ? S1[p-1] : -1;
mm3 = ((SAME_STRAND(q,q+1)) && !pair_table[q+1]) ? S1[q+1] : -1;
tmp = MIN2(
E_mm5_occupied + E_MLstem(tt, -1, mm3, P),
E_mm5_available + E_MLstem(tt, mm5, mm3, P)
);
tmp = MIN2(tmp, E_mm5_available + E_MLstem(tt, -1, mm3, P));
tmp2 = MIN2(
E_mm5_occupied + E_MLstem(tt, -1, -1, P),
E_mm5_available + E_MLstem(tt, mm5, -1, P)
);
E_mm5_available = MIN2(tmp2, E_mm5_available + E_MLstem(tt, -1, -1, P));
E_mm5_occupied = tmp;
tmp = MIN2(
E2_mm5_occupied + E_MLstem(tt, -1, mm3, P),
E2_mm5_available + E_MLstem(tt, mm5, mm3, P)
);
tmp = MIN2(tmp, E2_mm5_available + E_MLstem(tt, -1, mm3, P));
tmp2 = MIN2(
E2_mm5_occupied + E_MLstem(tt, -1, -1, P),
E2_mm5_available + E_MLstem(tt, mm5, -1, P)
);
E2_mm5_available = MIN2(tmp2, E2_mm5_available + E_MLstem(tt, -1, -1, P));
E2_mm5_occupied = tmp;
/* printf("(%d,%d): \n E_o = %d, E_a = %d, E2_o = %d, E2_a = %d\n", p, q, E_mm5_occupied,E_mm5_available,E2_mm5_occupied,E2_mm5_available); */
/* seek to the next stem */
p = q + 1;
q_prev = q_prev2 = q;
while (p <= j && !pair_table[p]) p++;
u += p - q - 1; /* add unpaired nucleotides */
}
/* now lets see how we get the minimum including the enclosing stem */
type = pair[S[j]][S[i]]; if (type==0) type=7;
mm5 = ((SAME_STRAND(j-1,j)) && !pair_table[j-1]) ? S1[j-1] : -1;
mm3 = ((SAME_STRAND(i,i+1)) && !pair_table[i+1]) ? S1[i+1] : -1;
if(q_prev + 2 < p){
E_mm5_available = MIN2(E_mm5_available, E_mm5_occupied);
E_mm5_occupied = E_mm5_available;
}
if(q_prev2 + 2 < p){
E2_mm5_available = MIN2(E2_mm5_available, E2_mm5_occupied);
E2_mm5_occupied = E2_mm5_available;
}
energy = MIN2(E_mm5_occupied + E_MLstem(type, -1, -1, P),
E_mm5_available + E_MLstem(type, mm5, -1, P)
);
energy = MIN2(energy, E_mm5_available + E_MLstem(type, -1, -1, P));
energy = MIN2(energy, E2_mm5_occupied + E_MLstem(type, -1, mm3, P));
energy = MIN2(energy, E2_mm5_occupied + E_MLstem(type, -1, -1, P));
energy = MIN2(energy, E2_mm5_available + E_MLstem(type, mm5, mm3, P));
energy = MIN2(energy, E2_mm5_available + E_MLstem(type, -1, mm3, P));
energy = MIN2(energy, E2_mm5_available + E_MLstem(type, mm5, -1, P));
energy = MIN2(energy, E2_mm5_available + E_MLstem(type, -1, -1, P));
break;
}/* end switch dangle_model */
energy += P->MLclosing;
/* logarithmic ML loop energy if logML */
if(logML && (u>6))
energy += 6*P->MLbase+(int)(P->lxc*log((double)u/6.));
else
energy += (u*P->MLbase);
return energy;
}
/*---------------------------------------------------------------------------*/
PUBLIC int loop_energy(short * ptable, short *s, short *s1, int i) {
/* compute energy of a single loop closed by base pair (i,j) */
int j, type, p,q, energy;
short *Sold, *S1old, *ptold;
ptold=pair_table; Sold = S; S1old = S1;
pair_table = ptable; S = s; S1 = s1;
if (i==0) { /* evaluate exterior loop */
energy = energy_of_extLoop_pt(0,pair_table);
pair_table=ptold; S=Sold; S1=S1old;
return energy;
}
j = pair_table[i];
if (j<i) nrerror("i is unpaired in loop_energy()");
type = pair[S[i]][S[j]];
if (type==0) {
type=7;
if (eos_debug>=0)
fprintf(stderr,"WARNING: bases %d and %d (%c%c) can't pair!\n", i, j,
Law_and_Order[S[i]],Law_and_Order[S[j]]);
}
p=i; q=j;
while (pair_table[++p]==0);
while (pair_table[--q]==0);
if (p>q) { /* Hairpin */
char loopseq[8] = "";
if (SAME_STRAND(i,j)) {
if (j-i-1<7) {
int u;
for (u=0; i+u<=j; u++) loopseq[u] = Law_and_Order[S[i+u]];
loopseq[u] = '\0';
}
energy = E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], loopseq, P);
} else {
energy = energy_of_extLoop_pt(cut_in_loop(i), pair_table);
}
}
else if (pair_table[q]!=(short)p) { /* multi-loop */
int ii;
ii = cut_in_loop(i);
energy = (ii==0) ? energy_of_ml_pt(i, pair_table) : energy_of_extLoop_pt(ii, pair_table);
}
else { /* found interior loop */
int type_2;
type_2 = pair[S[q]][S[p]];
if (type_2==0) {
type_2=7;
if (eos_debug>=0)
fprintf(stderr,"WARNING: bases %d and %d (%c%c) can't pair!\n", p, q,
Law_and_Order[S[p]],Law_and_Order[S[q]]);
}
/* energy += LoopEnergy(i, j, p, q, type, type_2); */
if ( SAME_STRAND(i,p) && SAME_STRAND(q,j) )
energy = E_IntLoop(p-i-1, j-q-1, type, type_2,
S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
else
energy = energy_of_extLoop_pt(cut_in_loop(i), pair_table);
}
pair_table=ptold; S=Sold; S1=S1old;
return energy;
}
/*---------------------------------------------------------------------------*/
PUBLIC float energy_of_move(const char *string, const char *structure, int m1, int m2) {
int energy;
short *ss, *ss1;
#ifdef _OPENMP
if(P == NULL) update_fold_params();
#else
if((init_length<0)||(P==NULL)) update_fold_params();
#endif
if (fabs(P->temperature - temperature)>1e-6) update_fold_params();
if (strlen(structure)!=strlen(string))
nrerror("energy_of_struct: string and structure have unequal length");
/* save the S and S1 pointers in case they were already in use */
ss = S; ss1 = S1;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
pair_table = make_pair_table(structure);
energy = energy_of_move_pt(pair_table, S, S1, m1, m2);
free(pair_table);
free(S); free(S1);
S=ss; S1=ss1;
return (float) energy/100.;
}
/*---------------------------------------------------------------------------*/
PUBLIC int energy_of_move_pt(short *pt, short *s, short *s1, int m1, int m2) {
/*compute change in energy given by move (m1,m2)*/
int en_post, en_pre, i,j,k,l, len;
len = pt[0];
k = (m1>0)?m1:-m1;
l = (m2>0)?m2:-m2;
/* first find the enclosing pair i<k<l<j */
for (j=l+1; j<=len; j++) {
if (pt[j]<=0) continue; /* unpaired */
if (pt[j]<k) break; /* found it */
if (pt[j]>j) j=pt[j]; /* skip substructure */
else {
fprintf(stderr, "%d %d %d %d ", m1, m2, j, pt[j]);
nrerror("illegal move or broken pair table in energy_of_move()");
}
}
i = (j<=len) ? pt[j] : 0;
en_pre = loop_energy(pt, s, s1, i);
en_post = 0;
if (m1<0) { /*it's a delete move */
en_pre += loop_energy(pt, s, s1, k);
pt[k]=0;
pt[l]=0;
} else { /* insert move */
pt[k]=l;
pt[l]=k;
en_post += loop_energy(pt, s, s1, k);
}
en_post += loop_energy(pt, s, s1, i);
/* restore pair table */
if (m1<0) {
pt[k]=l;
pt[l]=k;
} else {
pt[k]=0;
pt[l]=0;
}
/* Cofolding -- Check if move changes COFOLD-Penalty */
if (!SAME_STRAND(k,l)) {
int p, c; p=c=0;
for (p=1; p < cut_point; ) { /* Count basepairs between two strands */
if (pt[p] != 0) {
if (SAME_STRAND(p,pt[p])) /* Skip stuff */
p=pt[p];
else if (++c > 1) break; /* Count a basepair, break if we have more than one */
}
p++;
}
if (m1<0 && c==1) /* First and only inserted basepair */
return (en_post - en_pre - P->DuplexInit);
else
if (c==0) /* Must have been a delete move */
return (en_post - en_pre + P->DuplexInit);
}
return (en_post - en_pre);
}
PRIVATE int cut_in_loop(int i) {
/* walk around the loop; return j pos of pair after cut if
cut_point in loop else 0 */
int p, j;
p = j = pair_table[i];
do {
i = pair_table[p]; p = i+1;
while ( pair_table[p]==0 ) p++;
} while (p!=j && SAME_STRAND(i,p));
return SAME_STRAND(i,p) ? 0 : j;
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_ptypes(const short *S, const char *structure, paramT *P) {
int n,i,j,k,l;
n=S[0];
for (k=1; k<n-TURN; k++)
for (l=1; l<=2; l++) {
int type,ntype=0,otype=0;
i=k; j = i+TURN+l; if (j>n) continue;
type = pair[S[i]][S[j]];
while ((i>=1)&&(j<=n)) {
if ((i>1)&&(j<n)) ntype = pair[S[i-1]][S[j+1]];
if (noLonelyPairs && (!otype) && (!ntype))
type = 0; /* i.j can only form isolated pairs */
ptype[indx[j]+i] = (char) type;
otype = type;
type = ntype;
i--; j++;
}
}
if (struct_constrained && (structure != NULL)){
constrain_ptypes(structure, (unsigned int)n, ptype, BP, TURN, 0);
if(P->model_details.canonicalBPonly)
for(i=1;i<n;i++)
for(j=i+1;j<=n;j++)
if(ptype[indx[j]+i] == 7){
warn_user("removing non-canonical base pair from constraint");
ptype[indx[j]+i] = 0;
}
}
}
PUBLIC void assign_plist_from_db(plist **pl, const char *struc, float pr){
/* convert bracket string to plist */
short *pt;
int i, k = 0, size, n;
plist *gpl, *ptr;
size = strlen(struc);
n = 2;
pt = make_pair_table(struc);
*pl = (plist *)space(n*size*sizeof(plist));
for(i = 1; i < size; i++){
if(pt[i]>i){
(*pl)[k].i = i;
(*pl)[k].j = pt[i];
(*pl)[k].p = pr;
(*pl)[k++].type = 0;
}
}
gpl = get_plist_gquad_from_db(struc, pr);
for(ptr = gpl; ptr->i != 0; ptr++){
if (k == n * size - 1){
n *= 2;
*pl = (plist *)xrealloc(*pl, n * size * sizeof(plist));
}
(*pl)[k].i = ptr->i;
(*pl)[k].j = ptr->j;
(*pl)[k].p = ptr->p;
(*pl)[k++].type = ptr->type;
}
free(gpl);
(*pl)[k].i = 0;
(*pl)[k].j = 0;
(*pl)[k].p = 0.;
(*pl)[k++].type = 0.;
free(pt);
*pl = (plist *)xrealloc(*pl, k * sizeof(plist));
}
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
PUBLIC int HairpinE(int size, int type, int si1, int sj1, const char *string) {
int energy;
energy = (size <= 30) ? P->hairpin[size] :
P->hairpin[30]+(int)(P->lxc*log((size)/30.));
if (tetra_loop){
if (size == 4) { /* check for tetraloop bonus */
char tl[7]={0}, *ts;
strncpy(tl, string, 6);
if ((ts=strstr(P->Tetraloops, tl)))
return (P->Tetraloop_E[(ts - P->Tetraloops)/7]);
}
if (size == 6) {
char tl[9]={0}, *ts;
strncpy(tl, string, 8);
if ((ts=strstr(P->Hexaloops, tl)))
return (energy = P->Hexaloop_E[(ts - P->Hexaloops)/9]);
}
if (size == 3) {
char tl[6]={0,0,0,0,0,0}, *ts;
strncpy(tl, string, 5);
if ((ts=strstr(P->Triloops, tl))) {
return (P->Triloop_E[(ts - P->Triloops)/6]);
}
if (type>2) /* neither CG nor GC */
energy += P->TerminalAU; /* penalty for closing AU GU pair IVOO??
sind dass jetzt beaunuesse oder mahlnuesse (vorzeichen?)*/
return energy;
}
}
energy += P->mismatchH[type][si1][sj1];
return energy;
}
/*---------------------------------------------------------------------------*/
PUBLIC int oldLoopEnergy(int i, int j, int p, int q, int type, int type_2) {
/* compute energy of degree 2 loop (stack bulge or interior) */
int n1, n2, m, energy;
n1 = p-i-1;
n2 = j-q-1;
if (n1>n2) { m=n1; n1=n2; n2=m; } /* so that n2>=n1 */
if (n2 == 0)
energy = P->stack[type][type_2]; /* stack */
else if (n1==0) { /* bulge */
energy = (n2<=MAXLOOP)?P->bulge[n2]:
(P->bulge[30]+(int)(P->lxc*log(n2/30.)));
#if STACK_BULGE1
if (n2==1) energy+=P->stack[type][type_2];
#endif
} else { /* interior loop */
if ((n1+n2==2)&&(james_rule))
/* special case for loop size 2 */
energy = P->int11[type][type_2][S1[i+1]][S1[j-1]];
else {
energy = (n1+n2<=MAXLOOP)?(P->internal_loop[n1+n2]):
(P->internal_loop[30]+(int)(P->lxc*log((n1+n2)/30.)));
#if NEW_NINIO
energy += MIN2(MAX_NINIO, (n2-n1)*P->ninio[2]);
#else
m = MIN2(4, n1);
energy += MIN2(MAX_NINIO,((n2-n1)*P->ninio[m]));
#endif
energy += P->mismatchI[type][S1[i+1]][S1[j-1]]+
P->mismatchI[type_2][S1[q+1]][S1[p-1]];
}
}
return energy;
}
/*--------------------------------------------------------------------------*/
PUBLIC int LoopEnergy(int n1, int n2, int type, int type_2,
int si1, int sj1, int sp1, int sq1) {
/* compute energy of degree 2 loop (stack bulge or interior) */
int nl, ns, energy;
if (n1>n2) { nl=n1; ns=n2;}
else {nl=n2; ns=n1;}
if (nl == 0)
return P->stack[type][type_2]; /* stack */
if (ns==0) { /* bulge */
energy = (nl<=MAXLOOP)?P->bulge[nl]:
(P->bulge[30]+(int)(P->lxc*log(nl/30.)));
if (nl==1) energy += P->stack[type][type_2];
else {
if (type>2) energy += P->TerminalAU;
if (type_2>2) energy += P->TerminalAU;
}
return energy;
}
else { /* interior loop */
if (ns==1) {
if (nl==1) /* 1x1 loop */
return P->int11[type][type_2][si1][sj1];
if (nl==2) { /* 2x1 loop */
if (n1==1)
energy = P->int21[type][type_2][si1][sq1][sj1];
else
energy = P->int21[type_2][type][sq1][si1][sp1];
return energy;
}
else { /* 1xn loop */
energy = (nl+1<=MAXLOOP)?(P->internal_loop[nl+1]):
(P->internal_loop[30]+(int)(P->lxc*log((nl+1)/30.)));
energy += MIN2(MAX_NINIO, (nl-ns)*P->ninio[2]);
energy += P->mismatch1nI[type][si1][sj1]+
P->mismatch1nI[type_2][sq1][sp1];
return energy;
}
}
else if (ns==2) {
if(nl==2) { /* 2x2 loop */
return P->int22[type][type_2][si1][sp1][sq1][sj1];}
else if (nl==3) { /* 2x3 loop */
energy = P->internal_loop[5]+P->ninio[2];
energy += P->mismatch23I[type][si1][sj1]+
P->mismatch23I[type_2][sq1][sp1];
return energy;
}
}
{ /* generic interior loop (no else here!)*/
energy = (n1+n2<=MAXLOOP)?(P->internal_loop[n1+n2]):
(P->internal_loop[30]+(int)(P->lxc*log((n1+n2)/30.)));
energy += MIN2(MAX_NINIO, (nl-ns)*P->ninio[2]);
energy += P->mismatchI[type][si1][sj1]+
P->mismatchI[type_2][sq1][sp1];
}
}
return energy;
}
PRIVATE int ML_Energy(int i, int is_extloop) {
/* i is the 5'-base of the closing pair (or 0 for exterior loop)
loop is scored as ML if extloop==0 else as exterior loop
since each helix can coaxially stack with at most one of its
neighbors we need an auxiliarry variable cx_energy
which contains the best energy given that the last two pairs stack.
energy holds the best energy given the previous two pairs do not
stack (i.e. the two current helices may stack)
We don't allow the last helix to stack with the first, thus we have to
walk around the Loop twice with two starting points and take the minimum
*/
int energy, cx_energy, best_energy=INF;
int i1, j, p, q, u, x, type, count;
int mlintern[NBPAIRS+1], mlclosing, mlbase;
int dangle_model = P->model_details.dangles;
if (is_extloop) {
for (x = 0; x <= NBPAIRS; x++)
mlintern[x] = P->MLintern[x]-P->MLintern[1]; /* 0 or TerminalAU */
mlclosing = mlbase = 0;
} else {
for (x = 0; x <= NBPAIRS; x++) mlintern[x] = P->MLintern[x];
mlclosing = P->MLclosing; mlbase = P->MLbase;
}
/* as we do not only have dangling end but also mismatch contributions,
** we do this a bit different to previous implementations
*/
if(is_extloop){
energy = 0;
i1 = i;
p = i+1;
int E_mm5_available, E_mm5_occupied;
/* find out if we may have 5' mismatch for the next stem */
while (p <= (int)pair_table[0] && pair_table[p]==0) p++;
/* get position of pairing partner */
if(p < (int)pair_table[0]){
E_mm5_occupied = (p - i - 1 > 0) ? INF : 0;
E_mm5_available = (p - i - 1 > 0) ? 0 : INF;
}
if(p < (int)pair_table[0])
do{
int tt;
/* p must have a pairing partner */
q = (int)pair_table[p];
/* get type of base pair (p,q) */
tt = pair[S[p]][S[q]];
if(tt==0) tt=7;
int mm5 = ((SAME_STRAND(p-1,p)) && (p>1)) ? S1[p-1]: -1;
int mm3 = ((SAME_STRAND(q,q+1)) && (q<(unsigned int)pair_table[0])) ? S1[q+1]: -1;
switch(dangle_model){
/* dangle_model == 0 */
case 0: energy += E_ExtLoop(tt, -1, -1, P);
break;
/* dangle_model == 1 */
case 1: {
/* check for unpaired nucleotide 3' to the current stem */
int u3 = ((q < pair_table[0]) && (pair_table[q+1] == 0)) ? 1 : 0;
if(pair_table[p-1] != 0) mm5 = -1;
if(!u3){
mm3 = -1;
E_mm5_occupied = MIN2(
E_mm5_occupied + E_ExtLoop(tt, -1, -1, P),
E_mm5_available + E_ExtLoop(tt, mm5, -1, P)
);
E_mm5_available = E_mm5_occupied;
}
else{
E_mm5_occupied = MIN2(
E_mm5_occupied + E_ExtLoop(tt, -1, mm3, P),
E_mm5_available + E_ExtLoop(tt, mm5, mm3, P)
);
E_mm5_available = MIN2(
E_mm5_occupied + E_ExtLoop(tt, -1, -1, P),
E_mm5_available + E_ExtLoop(tt, mm5, -1, P)
);
}
}
break;
/* the beloved case dangle_model == 2 */
case 2: energy += E_ExtLoop(tt, mm5, mm3, P);
break;
/* dangle_model == 3 a.k.a. helix stacking */
case 3: break;
} /* end switch dangle_model */
/* seek to the next stem */
p = q + 1;
while (p <= (int)pair_table[0] && pair_table[p]==0) p++;
if(p == (int)pair_table[0] + 1){
if(dangle_model == 1)
energy = (p > q + 1) ? E_mm5_occupied : E_mm5_available;
q = 0;
break;
}
} while(q != i);
}
/* not exterior loop */
else{
for (count=0; count<2; count++) { /* do it twice */
int ld5 = 0; /* 5' dangle energy on prev pair (type) */
if ( i==0 ) {
j = (unsigned int)pair_table[0]+1;
type = 0; /* no pair */
}
else {
j = (unsigned int)pair_table[i];
type = pair[S[j]][S[i]]; if (type==0) type=7;
if (dangle_model==3) { /* prime the ld5 variable */
if (SAME_STRAND(j-1,j)) {
ld5 = P->dangle5[type][S1[j-1]];
if ((p=(unsigned int)pair_table[j-2]) && SAME_STRAND(j-2, j-1))
if (P->dangle3[pair[S[p]][S[j-2]]][S1[j-1]]<ld5) ld5 = 0;
}
}
}
i1=i; p = i+1; u=0;
energy = 0; cx_energy=INF;
do { /* walk around the multi-loop */
int tt, new_cx = INF;
/* hop over unpaired positions */
while (p <= (unsigned int)pair_table[0] && pair_table[p]==0) p++;
/* memorize number of unpaired positions */
u += p-i1-1;
/* get position of pairing partner */
if ( p == (unsigned int)pair_table[0]+1 ){
q = 0;tt = 0; /* virtual root pair */
} else {
q = (unsigned int)pair_table[p];
/* get type of base pair P->q */
tt = pair[S[p]][S[q]]; if (tt==0) tt=7;
}
energy += mlintern[tt];
cx_energy += mlintern[tt];
if (dangle_model) {
int dang5=0, dang3=0, dang;
if ((SAME_STRAND(p-1,p))&&(p>1))
dang5=P->dangle5[tt][S1[p-1]]; /* 5'dangle of pq pair */
if ((SAME_STRAND(i1,i1+1))&&(i1<(unsigned int)S[0]))
dang3 = P->dangle3[type][S1[i1+1]]; /* 3'dangle of previous pair */
switch (p-i1-1) {
case 0: /* adjacent helices */
if (dangle_model==2)
energy += dang3+dang5;
else if (dangle_model==3 && i1!=0) {
if (SAME_STRAND(i1,p)) {
new_cx = energy + P->stack[rtype[type]][rtype[tt]];
/* subtract 5'dangle and TerminalAU penalty */
new_cx += -ld5 - mlintern[tt]-mlintern[type]+2*mlintern[1];
}
ld5=0;
energy = MIN2(energy, cx_energy);
}
break;
case 1: /* 1 unpaired base between helices */
dang = (dangle_model==2)?(dang3+dang5):MIN2(dang3, dang5);
if (dangle_model==3) {
energy = energy +dang; ld5 = dang - dang3;
/* may be problem here: Suppose
cx_energy>energy, cx_energy+dang5<energy
and the following helices are also stacked (i.e.
we'll subtract the dang5 again */
if (cx_energy+dang5 < energy) {
energy = cx_energy+dang5;
ld5 = dang5;
}
new_cx = INF; /* no coax stacking with mismatch for now */
} else
energy += dang;
break;
default: /* many unpaired base between helices */
energy += dang5 +dang3;
if (dangle_model==3) {
energy = MIN2(energy, cx_energy + dang5);
new_cx = INF; /* no coax stacking possible */
ld5 = dang5;
}
}
type = tt;
}
if (dangle_model==3) cx_energy = new_cx;
i1 = q; p=q+1;
} while (q!=i);
best_energy = MIN2(energy, best_energy); /* don't use cx_energy here */
/* fprintf(stderr, "%6.2d\t", energy); */
if (dangle_model!=3 || is_extloop) break; /* may break cofold with co-ax */
/* skip a helix and start again */
while (pair_table[p]==0) p++;
if (i == (unsigned int)pair_table[p]) break;
i = (unsigned int)pair_table[p];
}
energy = best_energy;
energy += mlclosing;
/* logarithmic ML loop energy if logML */
if ( (!is_extloop) && logML && (u>6) )
energy += 6*mlbase+(int)(P->lxc*log((double)u/6.));
else
energy += mlbase*u;
/* fprintf(stderr, "\n"); */
}
return energy;
}
PUBLIC void initialize_fold(int length){
/* DO NOTHING */
}
PUBLIC float energy_of_struct(const char *string, const char *structure){
return energy_of_structure(string, structure, eos_debug);
}
PUBLIC int energy_of_struct_pt(const char *string, short * ptable, short *s, short *s1){
return energy_of_structure_pt(string, ptable, s, s1, eos_debug);
}
PUBLIC float energy_of_circ_struct(const char *string, const char *structure){
return energy_of_circ_structure(string, structure, eos_debug);
}
|
GB_binop__cmplx_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp32)
// C=scalar+B GB (_bind1st__cmplx_fp32)
// C=scalar+B' GB (_bind1st_tran__cmplx_fp32)
// C=A+scalar GB (_bind2nd__cmplx_fp32)
// C=A'+scalar GB (_bind2nd_tran__cmplx_fp32)
// C type: GxB_FC32_t
// A type: float
// B,b type: float
// BinaryOp: cij = GxB_CMPLXF (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GxB_CMPLXF (GBX (Ax, pA, A_iso), 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GxB_CMPLXF (GBX (Bx, pB, B_iso), 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLXF (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__cmplx_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = GxB_CMPLXF (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = GxB_CMPLXF (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
APOP.h | /*
* APOP.h
*
* Created on: Jul 20, 2016
* Author: mason
*/
#ifndef APOP_H_
#define APOP_H_
#include "MyLib.h"
#include "Alphabet.h"
#include "Node.h"
#include "Graph.h"
#include "APParam.h"
// for sparse features
struct APParams {
public:
APParam W;
PAlphabet elems;
int nVSize;
int nDim;
public:
APParams() {
nVSize = 0;
nDim = 0;
elems = NULL;
}
inline void exportAdaParams(ModelUpdate& ada) {
ada.addParam(&W);
}
inline void initialWeights(int nOSize) {
if (nVSize == 0) {
std::cout << "please check the alphabet" << std::endl;
return;
}
nDim = nOSize;
W.initial(nOSize, nVSize);
}
//random initialization
inline void initial(PAlphabet alpha, int nOSize, int base = 1) {
assert(base >= 1);
elems = alpha;
nVSize = base * elems->size();
if (base > 1) {
std::cout << "nVSize: " << nVSize << ", Alpha Size = " << elems->size() << ", Require more Alpha."<< std::endl;
elems->set_fixed_flag(false);
}
initialWeights(nOSize);
}
inline int getFeatureId(const string& strFeat) {
int idx = elems->from_string(strFeat);
if(!elems->m_b_fixed && elems->m_size >= nVSize) {
std::cout << "AP Alphabet stopped collecting features" << std::endl;
elems->set_fixed_flag(true);
}
return idx;
}
};
//only implemented sparse linear node.
//non-linear transformations are not support,
class APNode : public Node {
public:
APParams* param;
vector<int> ins;
bool bTrain;
public:
APNode() : Node() {
ins.clear();
param = NULL;
node_type = "apnode";
}
inline void setParam(APParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
ins.clear();
bTrain = false;
}
public:
//notice the output
void forward(Graph *cg, const vector<string>& x) {
int featId;
int featSize = x.size();
for (int idx = 0; idx < featSize; idx++) {
featId = param->getFeatureId(x[idx]);
if (featId >= 0) {
ins.push_back(featId);
}
}
degree = 0;
cg->addNode(this);
bTrain = cg->train;
}
public:
inline void compute() {
param->W.value(ins, val, bTrain);
}
//no output losses
void backward() {
//assert(param != NULL);
param->W.loss(ins, loss);
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
APNode* conv_other = (APNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
class APExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute APNode::generate(bool bTrain, dtype cur_drop_factor) {
APExecute* exec = new APExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
}
#endif /* APOP_H_ */
|
parallel_measurement.c | /*
Calculating the value of pi using reduction : Parallel Implementation
Author : Omkar Damle.
Date : August 2016.
*/
#include<stdio.h>
#include<math.h>
#include<omp.h>
#include<time.h>
#include<string.h>
#include<stdlib.h>
// Using the MONOTONIC clock
#define CLK CLOCK_MONOTONIC
/* Function to compute the difference between two points in time */
struct timespec diff(struct timespec start, struct timespec end);
/*
Function to computes the difference between two time instances
Taken from - http://www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime/
Further reading:
http://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance
http://stackoverflow.com/questions/3523442/difference-between-clock-realtime-and-clock-monotonic
*/
struct timespec diff(struct timespec start, struct timespec end){
struct timespec temp;
if((end.tv_nsec-start.tv_nsec)<0){
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int main(int argc, char* argv[])
{
struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg;
/* Should start before anything else */
clock_gettime(CLK, &start_e2e);
/* Check if enough command-line arguments are taken in. */
if(argc < 3){
printf( "Usage: %s n p \n", argv[0] );
return -1;
}
int n=atoi(argv[1]); /* size of input array */
int p=atoi(argv[2]); /* number of processors*/
char *problem_name = "matrix_multiplication";
char *approach_name = "omp_parallel";
// char buffer[10];
// FILE* inputFile;
FILE* outputFile;
// inputFile = fopen(argv[3],"r");
char outputFileName[50];
sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]);
int *a[n],*b[n],*c[n];
//counters for loops
int i,j,k;
//putting values in the matrices;
for(i = 0;i < n;i++){
a[i] = (int *) malloc(n * sizeof(int));
b[i] = (int *) malloc(n * sizeof(int));
c[i] = (int *) malloc(n * sizeof(int));
for(j = 0; j < n; j++){
a[i][j] = 1;
b[i][j] = 1;
c[i][j] = 0;
}
}
//Setting parameters for parallelizing the code
clock_gettime(CLK, &start_alg); /* Start the algo timer */
/*----------------------Core algorithm starts here----------------------------------------------*/
omp_set_num_threads(p);
//Matrix multiplication
//#pragma omp parallel private(i,j,k)
//{
//int id = omp_get_thread_num();
//int start = id*(n/p);
//int end = (id+1)*(n/p);
//if(id == p-1)
// end = n;
// printf("I'm here, %d\n", id);
//for(i=start;i<end;i++){
for(i=0;i<n;i++){
#pragma omp for private(j,k)
for(j=0;j<n;j++){
for(k=0;k<n;k++){
// printf("(%d,%d,%d)\n", i,j,k);
c[i][j] += a[i][k]*b[k][j];
}
}
}
//}
/*----------------------Core algorithm finished--------------------------------------------------*/
clock_gettime(CLK, &end_alg); /* End the algo timer */
/* Ensure that only the algorithm is present between these two
timers. Further, the whole algorithm should be present. */
/* Should end before anything else (printing comes later) */
clock_gettime(CLK, &end_e2e);
e2e = diff(start_e2e, end_e2e);
alg = diff(start_alg, end_alg);
// /*-----------REMOVE THIS SEGMENT. ONLY FOR DEBUGGING----------------*/
// for(i=0;i<n;i++){
// for(j=0;j<n;j++)
// printf("%d ", c[i][j]);
// printf("\n");
// }
// /*-------------------------------------------------------------------*/
outputFile = fopen(outputFileName,"w");
// fprintf(outputFile,"%.8f\n",pi);
/* problem_name,approach_name,n,p,e2e_sec,e2e_nsec,alg_sec,alg_nsec
Change problem_name to whatever problem you've been assigned
Change approach_name to whatever approach has been assigned
p should be 0 for serial codes!!
*/
printf("%s,%s,%d,%d,%d,%ld,%d,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec);
return 0;
}
|
IrvingKirkwood.h | #ifndef __IRVING_KIRKWOOD_H__
#define __IRVING_KIRKWOOD_H__
#include <pyglasstools/Calculator.h>
#include <omp.h>
#include "cgfunc/CoarseGrainFunction.h"
class PYBIND11_EXPORT IrvingKirkwood : public Calculator
{
public:
IrvingKirkwood( std::shared_ptr< ParticleSystem > sysdata,
std::shared_ptr< PairPotential > potential,
std::shared_ptr< CoarseGrainFunction > cgfunc,
std::shared_ptr< MPI::Communicator > comm )
: Calculator(sysdata,potential), m_cgfunc(cgfunc), m_comm(comm)
{
};
~IrvingKirkwood(){};
void compute(const std::vector< Eigen::Vector3d >& gridpoints);
virtual void addObservable(const std::shared_ptr<CoarseGrainedField>& obs)
{
m_observables.insert(std::pair<std::string, std::shared_ptr<CoarseGrainedField> >(obs->name, obs));
}
virtual void printDisplacement()
{
for( auto p_i = m_sysdata->particles.begin(); p_i != m_sysdata->particles.end(); ++p_i)
{
int id = abr::get<abr::id>(*p_i);
py::print(abr::get<displacement>(*p_i)[0],abr::get<displacement>(*p_i)[1],id);
py::print("WHY",abr::get<displacement>(m_sysdata->particles[id])[0],abr::get<displacement>(m_sysdata->particles[id])[1],abr::get<abr::id>(*p_i));
}
}
virtual void clearState(unsigned int grid_id)
{
for (auto it=m_observables.begin(); it!=m_observables.end(); ++it)
it->second->clear(grid_id);
}
virtual void computeLocalObsPerGrid(const AboriaParticles::value_type& particle_i,
double cgval, unsigned int grid_id)
{
for (auto it=m_observables.begin(); it!=m_observables.end(); ++it)
{
if (it->second->islocal)
it->second->accumulate(particle_i,cgval,grid_id);
else
continue;
}
}
virtual void computePairObsPerGrid( const AboriaParticles::value_type& particle_i,
const AboriaParticles::value_type& particle_j,
Eigen::Vector3d rij,
double bondval, unsigned int grid_id)
{
for (auto it=m_observables.begin(); it!=m_observables.end(); ++it)
{
if (!it->second->islocal)
it->second->accumulate(particle_i,particle_j, rij, m_potential, bondval, grid_id);
else
continue;
}
}
private:
std::shared_ptr< CoarseGrainFunction > m_cgfunc; //!< particle system, equipped with neighbor list
std::shared_ptr< MPI::Communicator > m_comm;
std::map< std::string, std::shared_ptr< CoarseGrainedField > > m_observables;
};
//Compute a Global Observable
void IrvingKirkwood::compute(const std::vector< Eigen::Vector3d >& gridpoints)
{
#pragma omp parallel for
for (unsigned int i = 0; i < gridpoints.size(); ++i)
{
clearState(i);
for( auto p_i = abr::euclidean_search(m_sysdata->particles.get_query(),
abr::vdouble3(gridpoints[i][0],gridpoints[i][1],gridpoints[i][2]), m_cgfunc->getRcut());
p_i != false; ++p_i)
{
//Set grid point X and position of particle ri
Eigen::Vector3d dr(p_i.dx()[0], p_i.dx()[1],p_i.dx()[2]);
Eigen::Vector3d x = gridpoints[i];
Eigen::Vector3d ri = gridpoints[i]-dr;
double cgval = m_cgfunc->getDeltaFunc(x,ri);
computeLocalObsPerGrid(*p_i, cgval, i);
//Compute a list of local observables
//Next we loop through the j-th particles for the virial stress
for( auto p_j = abr::euclidean_search(m_sysdata->particles.get_query(),
abr::get<position>(*p_i), max_rcut);
p_j != false; ++p_j)
{
//Make sure the particle is unique
if (abr::get<abr::id>(*p_i) != abr::get<abr::id>(*p_j))
{
//set the distance between particle i and particle j
Eigen::Vector3d rij(-p_j.dx()[0], -p_j.dx()[1], -p_j.dx()[2]);
double bondval = m_cgfunc->getBondFunc(x,ri,rij);
//Don't forget to set diameters of the potential
computePairObsPerGrid(*p_i, *p_j, rij, bondval, i);
}
}
}
}
};
void export_IrvingKirkwood(py::module& m)
{
py::class_<IrvingKirkwood, Calculator, std::shared_ptr<IrvingKirkwood> >(m,"IrvingKirkwood")
.def(py::init< std::shared_ptr< ParticleSystem >, std::shared_ptr< PairPotential >, std::shared_ptr< CoarseGrainFunction >, std::shared_ptr< MPI::Communicator > >())
.def("compute", &IrvingKirkwood::compute)
.def("setSystemData", &IrvingKirkwood::setSystemData)
.def("addObservable", &IrvingKirkwood::addObservable)
.def("printDisplacement", &IrvingKirkwood::printDisplacement)
;
};
#endif
|
geo_yeefdtd.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "geo_yeefdtd.kernel_inc.h"
int openmp_GEO_YEE_CURL_L_init (openmp_pscmc_env * pe ,openmp_GEO_YEE_CURL_L_struct * kerstr ){
return 0 ;}
void openmp_GEO_YEE_CURL_L_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_GEO_YEE_CURL_L_struct ));
}
int openmp_GEO_YEE_CURL_L_get_num_compute_units (openmp_GEO_YEE_CURL_L_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_GEO_YEE_CURL_L_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_GEO_YEE_CURL_L_exec (openmp_GEO_YEE_CURL_L_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_GEO_YEE_CURL_L_scmc_kernel ( ( kerstr )->inoutE1 , ( kerstr )->inB0 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->x0)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inoutE1 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutE1 = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inB0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inB0 = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_y_cpu_core (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_numvec (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_XLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_YLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ZLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ovlp (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_num_ele (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DT (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Z (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Y (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_X (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_GEO_YEE_CURL_L_scmc_set_parameter_x0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x0 = pm->d_data);
}
|
6198.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
util.h | #ifndef CORE_UTIL_H_
#define CORE_UTIL_H_
#include <math.h>
#include <omp.h>
#include <cstdio>
// For GCC
#ifndef __host__
#define __host__
#endif
#ifndef __device__
#define __device__
#endif
// From
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
// With help from https://stackoverflow.com/a/39287554/3427580
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
if (val == 0.0) {
return __longlong_as_double(old);
}
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <typename T>
__host__ __device__ inline void atomic_add(T *address, T val) {
#ifdef __CUDACC__ // CUDA versions of atomic add
atomicAdd(address, val);
#else // C++ version of atomic add
#pragma omp atomic
*address += val;
#endif
}
template <typename T>
__host__ __device__ inline T host_device_max(const T val1, const T val2) {
#ifdef __CUDACC__ // CUDA versions of atomic add
return max(val1, val2);
#else // C++ version of atomic add
return std::max(val1, val2);
#endif
}
template <typename T>
__host__ __device__ inline const T fnegmod(const T lval, const T rval) {
return fmod(fmod(lval, rval) + rval, rval);
}
__host__ __device__ inline int64_t negmod(const int64_t lval,
const int64_t rval) {
return ((lval % rval) + rval) % rval;
}
__host__ __device__ inline void IndToSubHW(const int64_t ind,
const int64_t width, int64_t &y,
int64_t &x) {
x = ind % width;
y = ind / width;
}
__host__ __device__ inline int64_t SubToIndHW(const int64_t y, const int64_t x,
const int64_t width) {
return y * width + x;
}
__host__ __device__ inline void IndToSubCHW(const int64_t ind,
const int64_t height,
const int64_t width, int64_t &c,
int64_t &y, int64_t &x) {
x = ind % width;
y = (ind / width) % height;
c = ind / (width * height);
}
__host__ __device__ inline int64_t SubToIndCHW(const int64_t c, const int64_t y,
const int64_t x,
const int64_t height,
const int64_t width) {
return c * height * width + y * width + x;
}
__host__ __device__ inline void IndToSubNCHW(
const int64_t ind, const int64_t channels, const int64_t height,
const int64_t width, int64_t &n, int64_t &c, int64_t &y, int64_t &x) {
x = ind % width;
y = (ind / width) % height;
c = (ind / (width * height)) % channels;
n = ind / (width * height * channels);
}
__host__ __device__ inline int64_t SubToIndNCHW(
const int64_t n, const int64_t c, const int64_t y, const int64_t x,
const int64_t channels, const int64_t height, const int64_t width) {
const int64_t hw = height * width;
return n * channels * hw + c * hw + y * width + x;
}
template <typename T>
__host__ __device__ inline void NormalizePinholeCamera(T &x, T &y, const T f,
const T cx, const T cy) {
x = (x - cx) / f;
y = (y - cy) / f;
}
template <typename T>
__host__ __device__ inline void DenormalizePinholeCamera(T &x, T &y, const T f,
const T cx,
const T cy) {
x = f * x + cx;
y = f * y + cy;
}
template <typename T>
__host__ __device__ inline void CreatePinholeCamera(T &f, T &cx, T &cy,
const int64_t height,
const int64_t width) {
// Create a simple pinhole camera
cx = static_cast<T>(width - 1) / T(2);
cy = static_cast<T>(height - 1) / T(2);
f = host_device_max(cx, cy);
}
template <typename T>
__host__ __device__ inline void NormalizeCoordinates(T &x, T &y,
const int64_t height,
const int64_t width) {
// Create a simple pinhole camera
T f;
T cx;
T cy;
CreatePinholeCamera(f, cx, cy, height, width);
NormalizePinholeCamera(x, y, f, cx, cy);
}
template <typename T>
__host__ __device__ inline void DenormalizeCoordinates(T &x, T &y,
const int64_t height,
const int64_t width) {
// Create a simple pinhole camera
T f;
T cx;
T cy;
CreatePinholeCamera(f, cx, cy, height, width);
DenormalizePinholeCamera(x, y, f, cx, cy);
}
#endif |
hermv_c_csc_n_lo_trans.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/util.h"
#include <memory.h>
static alphasparse_status_t
hermv_csc_n_lo_trans_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], y[i], beta);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT ail = aie - ais;
ALPHA_INT start = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if(start < aie && A->row_indx[start] == i){
ALPHA_Number tmp;
alpha_mul_3c(tmp, alpha, A->values[start]);
alpha_madde(y_local[tid][i], tmp, x[i]);
start += 1;
}
const ALPHA_INT* A_row = &A->row_indx[ais];
const ALPHA_Number* A_val = &A->values[ais];
ALPHA_INT ai = start - ais;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for(; ai < ail-3; ai+=4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde_2c(y_local[tid][ar0], av0, alpha_xi);
alpha_madde_2c(y_local[tid][ar1], av1, alpha_xi);
alpha_madde_2c(y_local[tid][ar2], av2, alpha_xi);
alpha_madde_2c(y_local[tid][ar3], av3, alpha_xi);
alpha_mul(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for(; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde_2c(y_local[tid][ar], av, alpha_xi);
alpha_mul(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT col = 0; col < m; col++)
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return hermv_csc_n_lo_trans_unroll(alpha, A, x, beta, y);
}
|
lu.base.pluto.par.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
double L[N][N];
double U[N][N];
double A[N][N+13];
void print_array()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
fprintf(stderr, "%lf ", round(A[i][j]));
if (j%80 == 79) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
void init_arrays()
{
int i, j, k;
/* have to initialize this matrix properly to prevent
* division by zero
*/
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
L[i][j] = 0.0;
U[i][j] = 0.0;
}
}
for (i=0; i<N; i++) {
for (j=0; j<=i; j++) {
L[i][j] = i+j+1;
U[j][i] = i+j+1;
}
}
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
for (k=0; k<N; k++) {
A[i][j] += L[i][k]*U[k][j];
}
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
#include <math.h>
#include <assert.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define S1(zT0,zT1,k,j) {A[k][j]=A[k][j]/A[k][k];}
#define S2(zT0,zT1,zT2,k,i,j) {A[i][j]=A[i][j]-A[i][k]*A[k][j];}
int c1, c2, c3, c4, c5, c6;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.02s. */
for (c1=-1;c1<=floord(2*N-3,32);c1++) {
lb1=max(max(ceild(16*c1-15,32),ceild(32*c1-N+2,32)),0);
ub1=min(floord(32*c1+31,32),floord(N-1,32));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(ceild(16*c1-16*c2-465,496),ceild(16*c1-16*c2-15,16));c3<=floord(N-1,32);c3++) {
if (c1 == c2+c3) {
for (c4=max(0,32*c3);c4<=min(min(32*c3+30,32*c2+30),N-2);c4++) {
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S1(c1-c2,c2,c4,c6) ;
}
}
for (c5=c4+1;c5<=min(32*c3+31,N-1);c5++) {
{
lbv=max(c4+1,32*c2);
ubv=min(32*c2+31,N-1);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1-c2,c1-c2,c2,c4,c5,c6) ;
}
}
}
}
}
/*@ begin Loop(
transform UnrollJam(ufactor=8)
for (c4=max(0,32*c1-32*c2);c4<=min(min(32*c1-32*c2+31,32*c3-1),32*c2+30);c4++)
transform UnrollJam(ufactor=8)
for (c5=32*c3;c5<=min(N-1,32*c3+31);c5++)
{
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1-c2,c3,c2,c4,c5,c6) ;
}
}
}
) @*/{
for (c4 = max(0, 32 * c1 - 32 * c2); c4 <= min(min(32 * c1 - 32 * c2 + 31, 32 * c3 - 1), 32 * c2 + 30) - 7; c4 = c4 + 8) {
for (c5 = 32 * c3; c5 <= min(N - 1, 32 * c3 + 31) - 7; c5 = c5 + 8) {
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, c4, c5, c6);
S2(c1 - c2, c3, c2, c4, (c5 + 1), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 2), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 3), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 4), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 5), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 6), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 7), c6);
}
}
{
lbv=max((c4+1)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 1), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 1), (c5 + 7), c6);
}
}
{
lbv=max((c4+2)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 2), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 2), (c5 + 7), c6);
}
}
{
lbv=max((c4+3)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 3), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 3), (c5 + 7), c6);
}
}
{
lbv=max((c4+4)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 4), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 4), (c5 + 7), c6);
}
}
{
lbv=max((c4+5)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 5), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 5), (c5 + 7), c6);
}
}
{
lbv=max((c4+6)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 6), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 6), (c5 + 7), c6);
}
}
{
lbv=max((c4+7)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 7), c5, c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 1), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 2), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 3), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 4), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 5), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 6), c6);
S2(c1 - c2, c3, c2, (c4 + 7), (c5 + 7), c6);
}
}
}
for (; c5 <= min(N - 1, 32 * c3 + 31); c5 = c5 + 1) {
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, c4, c5, c6);
}
}
{
lbv=max((c4+1)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 1), c5, c6);
}
}
{
lbv=max((c4+2)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 2), c5, c6);
}
}
{
lbv=max((c4+3)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 3), c5, c6);
}
}
{
lbv=max((c4+4)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 4), c5, c6);
}
}
{
lbv=max((c4+5)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 5), c5, c6);
}
}
{
lbv=max((c4+6)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 6), c5, c6);
}
}
{
lbv=max((c4+7)+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, (c4 + 7), c5, c6);
}
}
}
}
for (; c4 <= min(min(32 * c1 - 32 * c2 + 31, 32 * c3 - 1), 32 * c2 + 30); c4 = c4 + 1) {
for (c5 = 32 * c3; c5 <= min(N - 1, 32 * c3 + 31) - 7; c5 = c5 + 8)
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, c4, c5, c6);
S2(c1 - c2, c3, c2, c4, (c5 + 1), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 2), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 3), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 4), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 5), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 6), c6);
S2(c1 - c2, c3, c2, c4, (c5 + 7), c6);
}
}
for (; c5 <= min(N - 1, 32 * c3 + 31); c5 = c5 + 1)
{
lbv=max(c4+1,32*c2);
ubv=min(N-1,32*c2+31);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S2(c1 - c2, c3, c2, c4, c5, c6);
}
}
}
}
/*@ end @*/
if ((-c1 == -c2-c3) && (c1 <= min(floord(64*c2-1,32),floord(32*c2+N-33,32)))) {
{
lbv=max(32*c1-32*c2+32,32*c2);
ubv=min(32*c2+31,N-1);
#pragma ivdep
#pragma vector always
for (c6=lbv; c6<=ubv; c6++) {
S1(c1-c2,c2,32*c1-32*c2+31,c6) ;
}
}
}
}
}
}
/* End of CLooG code */
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
if (j%100==0)
printf("\n");
printf("%f ",A[i][j]);
}
printf("\n");
}
}
#endif
return ((int) A[0][0]);
}
|
ast-dump-openmp-single.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp single
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-single.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPSingleDirective {{.*}} <line:4:9, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-single.c:4:9) *const restrict'
|
NeuralNetwork_OMP_CPU2.c | /* NEURAL NETWORK OMP CPU2.c
* by Lut99
*
* Created:
* 4/18/2020, 11:25:46 PM
* Last edited:
* 19/11/2020, 17:18:00
* Auto updated?
* Yes
*
* Description:
* The NeuralNetwork class implements a matrix-based Feedforward Neural
* Network which is hardcoded to use Mean Squared Error for cost function and
* sigmoid as activation function.
*
* This file implements the second of eight different OpenMP-optimised
* versions for the CPU. It optimises both the forward and the backward pass
* using threads, where any race conditions in the backward pass are fixed
* using critical regions.
**/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "NeuralNetwork.h"
#define WEIGHTS_MIN -3.0
#define WEIGHTS_MAX 3.0
#define BIAS_MIN -3.0
#define BIAS_MAX 3.0
#define NUM_THREADS 16
/***** OPTIONAL PARAMETERS *****/
static unsigned int n_threads = 16;
/***** OPENMP DECLARATIONS *****/
extern int omp_set_num_threads();
extern int omp_get_num_procs();
extern int omp_get_thread_num();
/***** HELPER FUNCTIONS *****/
#define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0)
extern size_t max(size_t length, const size_t* list);
/***** NEURAL NETWORK OPERATIONS *****/
void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) {
#ifdef BENCHMARK
// Declare all timers
struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd;
// Set some shortcuts for the timers
size_t half_iters = n_iterations / 2;
size_t half_samples = n_samples / 2;
// Start the total timer
gettimeofday(&s_total, NULL);
#endif
// Also obtain links to all biases / matrices
double** biases = nn->biases;
double** weights = nn->weights;
// Make some shortcuts for the number-of-nodes information
size_t n_layers = nn->n_layers;
size_t* nodes_per_layer = nn->nodes_per_layer;
// Initialize the temporary delta memory (and previous list) to the correct size, one for each thread
size_t deltas_size = max(n_layers, nodes_per_layer);
double* deltas = malloc(sizeof(double) * n_threads * deltas_size);
double* prev_deltas = malloc(sizeof(double) * n_threads * deltas_size);
// Create a list that is used to store intermediate outputs. The first input layer (=first column)
// is linked and not copied to the input data
double* layer_outputs[n_threads][n_layers];
for (unsigned int t = 0; t < n_threads; t++) {
// Allocate arrays for the other layers except
for (size_t l = 1; l < n_layers; l++) {
layer_outputs[t][l] = malloc(sizeof(double) * nodes_per_layer[l]);
}
}
// Create the delta_biases and delta_weights arrays / matrices
double* delta_biases[nn->n_weights];
double* delta_weights[nn->n_weights];
for(size_t l = 0; l < nn->n_weights; l++) {
delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]);
delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]);
// Fill with zeros
for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) {
delta_biases[l][n] = 0;
for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) {
delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0;
}
}
}
#ifdef BENCHMARK
// Start the iterations timer
gettimeofday(&s_iters, NULL);
#endif
// Perform the training for n_iterations (always)
size_t last_nodes = nodes_per_layer[n_layers - 1];
size_t last_prev_nodes = nodes_per_layer[n_layers - 2];
double* last_delta_bias = delta_biases[n_layers - 2];
double* last_delta_weight = delta_weights[n_layers - 2];
for (size_t i = 0; i < n_iterations; i++) {
#pragma omp parallel
{
int TID = omp_get_thread_num();
double* t_deltas = deltas + TID * deltas_size;
double* t_prev_deltas = prev_deltas + TID * deltas_size;
double** t_layer_outputs = layer_outputs[TID];
#pragma omp for schedule(static)
for (size_t s = 0; s < n_samples; s++) {
/***** FORWARD PASS *****/
#ifdef BENCHMARK
// Start the forward pass timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_fwd, NULL);
}
#endif
// Set the inputs as the first layer
t_layer_outputs[0] = inputs[s];
// Iterate over each layer to feedforward through the network
for (size_t l = 1; l < n_layers; l++) {
// Get some references to the bias list, weight matrix and outputs of the previous and this layer
double* bias = biases[l - 1];
double* weight = weights[l - 1];
double* prev_output = t_layer_outputs[l - 1];
double* output = t_layer_outputs[l];
// Compute the activation for each node on this layer
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
for (size_t n = 0; n < this_nodes; n++) {
// Sum the weighted inputs for this node
double z = bias[n];
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
z += prev_output[prev_n] * weight[prev_n * this_nodes + n];
}
// Run the activation function over this input and store it in the output
output[n] = 1 / (1 + exp(-z));
}
}
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_fwd, NULL);
gettimeofday(&s_bck_out, NULL);
}
#endif
/***** BACKWARD PASS *****/
// Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547
// Backpropagate the error from the last layer to the first.
double* sample_expected = expected[s];
// Do the output layer: compute the deltas
double* output = t_layer_outputs[n_layers - 1];
for (size_t n = 0; n < last_nodes; n++) {
double output_val = output[n];
t_prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val);
}
// // Do the output layer: compute the bias & weight updates
#pragma omp critical
{
// Add all deltas as delta_biases for this layer
for (size_t n = 0; n < last_nodes; n++) {
last_delta_bias[n] += t_prev_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
double* last_prev_output = t_layer_outputs[n_layers - 2];
for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) {
for (size_t n = 0; n < last_nodes; n++) {
last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * t_prev_deltas[n];
}
}
}
#ifdef BENCHMARK
// End the backward pass output timer, start the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_out, NULL);
gettimeofday(&s_bck_hid, NULL);
}
#endif
// Then, the rest of the hidden layers
for (size_t l = n_layers - 2; l > 0; l--) {
double* delta_bias = delta_biases[l - 1];
double* delta_weight = delta_weights[l - 1];
double* output = t_layer_outputs[l];
double* prev_output = t_layer_outputs[l - 1];
size_t next_nodes = nodes_per_layer[l + 1];
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
// Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion
double* weight_next = weights[l];
for (size_t n = 0; n < this_nodes; n++) {
// Take the weighted sum of all connection of that node with this layer
double error = 0;
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
error += t_prev_deltas[next_n] * weight_next[n * next_nodes + next_n];
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = output[n];
t_deltas[n] = error * output_val * (1 - output_val);
}
// Add all deltas as delta_biases for this layer
#pragma omp critical
{
for (size_t n = 0; n < this_nodes; n++) {
delta_bias[n] += t_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
for (size_t n = 0; n < this_nodes; n++) {
delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * t_deltas[n];
}
}
}
// Swap the two delta lists
double* temp = t_deltas;
t_deltas = t_prev_deltas;
t_prev_deltas = temp;
}
#ifdef BENCHMARK
// End the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_hid, NULL);
}
#endif
}
#ifdef BENCHMARK
// Start the updates timer
if (i == half_iters) {
gettimeofday(&s_upd, NULL);
}
#endif
// Actually update the weights, and reset the delta updates to 0 for next iteration
#pragma omp for schedule(static)
for (size_t l = 0; l < nn->n_weights; l++) {
double* bias = biases[l];
double* delta_bias = delta_biases[l];
double* weight = weights[l];
double* delta_weight = delta_weights[l];
// Update the biases & reset delta_biases
size_t this_nodes = nodes_per_layer[l + 1];
for (size_t n = 0; n < this_nodes; n++) {
bias[n] += delta_bias[n] * learning_rate;
delta_bias[n] = 0;
}
// Update the weights & reset delta_weights
size_t prev_nodes = nodes_per_layer[l];
for (size_t i = 0; i < this_nodes * prev_nodes; i++) {
weight[i] += delta_weight[i] * learning_rate;
delta_weight[i] = 0;
}
}
#ifdef BENCHMARK
// Stop the updates timer
if (i == half_iters) {
gettimeofday(&e_upd, NULL);
}
#endif
}
}
#ifdef BENCHMARK
// End the iterations timer
gettimeofday(&e_iters, NULL);
#endif
// Cleanup
// Free the delta biases / weights
for(size_t l = 0; l < n_layers - 1; l++) {
free(delta_biases[l]);
free(delta_weights[l]);
}
// Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em)
for (unsigned int t = 0; t < n_threads; t++) {
for (size_t l = 1; l < n_layers; l++) {
free(layer_outputs[t][l]);
}
}
// Cleanup the deltas
free(deltas);
free(prev_deltas);
#ifdef BENCHMARK
// End the total timer
gettimeofday(&e_total, NULL);
// Print the results
printf("%f\n", TIMEVAL_TO_MS(s_total, e_total));
printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters));
printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd));
printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out));
printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid));
printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd));
#endif
}
/***** OTHER TOOLS *****/
void parse_opt_args(int argc, char** argv) {
// Parse and set number of threads as first argument
if (argc >= 1) {
// Set the number of threads
n_threads = atoi(argv[0]);
}
omp_set_num_threads(n_threads);
}
void print_opt_args() {
printf(" - Variation : OpenMP CPU 2 (Forward & Backward, critical)\n");
printf(" - Number of threads : %u\n", n_threads);
}
|
Hadamard.c | #include <stdlib.h>
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <omp.h>
int main(int argc, char** argv) {
// Check if argument is valid
if (argc < 2) {
printf("Provide size of the Matrix! Usage: ./Hadamard n\n");
return EXIT_FAILURE;
}
// Get number of threads
char* err;
int n = strtol(argv[1], &err, 10);
if (*err != '\0' && n == 0) {
printf("Invalid input! Usage: ./Hadamard n\n");
return EXIT_FAILURE;
}
if (n <= 0) {
printf("Invalid input! Size must be larger than Zero\n");
return EXIT_FAILURE;
}
// Allocate memory
int32_t (*a)[n] = malloc(sizeof(int[n][n]));
int32_t (*b)[n] = malloc(sizeof(int[n][n]));
int32_t (*c)[n] = malloc(sizeof(int[n][n]));
// Hadamard algoritm
int threads;
double startTime = omp_get_wtime();
#pragma omp parallel shared(a,b,c, threads)
{
#pragma omp for
#ifdef COL_MAJOR
for (size_t j = 0;j < n;++j) {
for (size_t i = 0;i < n;++i) {
#else
for (size_t i = 0;i < n;++i) {
for (size_t j = 0;j < n;++j) {
#endif // COL_MAJOR
c[i][j] = a[i][j] * b[i][j];
}
}
threads = omp_get_num_threads();
}
double endTime = omp_get_wtime();
printf("p = %d, n = %d, t = %2.2f\n", threads, n ,endTime - startTime);
// Free memory
if (a != NULL)
free(a);
if (b != NULL)
free(b);
if (c != NULL)
free(c);
return EXIT_SUCCESS;
} |
hdri2ldrcube.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
#include <float.h>
#include <omp.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define TJE_IMPLEMENTATION
#include "tiny_jpeg.h"
int img_width, img_height;
float* img_data;
void load_image(const char* filename)
{
int ch;
img_data = stbi_loadf(filename, &img_width, &img_height, &ch, 3);
if (!img_data)
{
fprintf(stderr, "failed to load image: %s\n", filename);
exit(1);
}
}
void check_ranges()
{
float minval = FLT_MAX, gminval = FLT_MAX;
float maxval = 0.0f, gmaxval = 0.0f;
for (int i = 0; i < img_width * img_height * 3; ++i)
{
float v = img_data[i];
if (minval > v)
minval = v;
if (maxval < v)
maxval = v;
float g = powf(v, 1.0f / 2.2f);
if (gminval > g)
gminval = g;
if (gmaxval < g)
gmaxval = g;
}
printf("data range (regular): [%f; %f]\n", minval, maxval);
printf("data range (gamma): [%f; %f]\n", gminval, gmaxval);
}
uint8_t* out_data;
int out_width, out_height, out_comps;
void alloc_image(int w, int h, int c)
{
if (out_data)
free(out_data);
out_width = w;
out_height = h;
out_comps = c;
out_data = (uint8_t*) malloc(c * w * h);
}
void generate_dump()
{
alloc_image(img_width, img_height, 3);
for (int i = 0; i < img_width * img_height * 3; ++i)
{
int v = powf(img_data[i], 1.0f / 2.2f) * 255;
out_data[i] = v > 255 ? 255 : v;
}
}
void ds2x()
{
uint8_t* src_data = out_data;
out_data = NULL;
alloc_image(out_width / 2, out_height / 2, out_comps);
#pragma omp parallel for
for (int y = 0; y < out_height; ++y)
{
for (int x = 0; x < out_width; ++x)
{
for (int c = 0; c < out_comps; ++c)
{
out_data[c + out_comps * (x + y * out_width)] = (
src_data[c + out_comps * (x * 2 + y * 2 * out_width * 2)] +
src_data[c + out_comps * (x * 2 + 1 + y * 2 * out_width * 2)] +
src_data[c + out_comps * (x * 2 + (y * 2 + 1) * out_width * 2)] +
src_data[c + out_comps * (x * 2 + 1 + (y * 2 + 1) * out_width * 2)]) / 4;
}
}
}
free(src_data);
}
void save_image_jpeg(const char* filename, int qual)
{
if (!tje_encode_to_file_at_quality(filename, qual, out_width, out_height, out_comps, out_data))
{
fprintf(stderr, "failed to save output image: %s\n", filename);
exit(1);
}
}
#define PI 3.14159f
static const float INVPI = 1.0f / PI;
static const float INVPI2 = 0.5f / PI;
void sample_hdr_uv(float out[3], float u, float v)
{
int x0 = floor(u);
int x1 = x0 + 1 < img_width - 1 ? x0 + 1 : img_width - 1;
int y0 = floor(v);
int y1 = y0 + 1 < img_height - 1 ? y0 + 1 : img_height - 1;
int off00 = (x0 + y0 * img_width) * 3;
int off10 = (x1 + y0 * img_width) * 3;
int off01 = (x0 + y1 * img_width) * 3;
int off11 = (x1 + y1 * img_width) * 3;
float fx = fmodf(u, 1.0f);
float fy = fmodf(v, 1.0f);
float ifx = 1.0f - fx;
float ify = 1.0f - fy;
out[0] = img_data[off00 + 0] * ifx * ify + img_data[off10 + 0] * fx * ify + img_data[off01 + 0] * ifx * fy + img_data[off11 + 0] * fx * fy;
out[1] = img_data[off00 + 1] * ifx * ify + img_data[off10 + 1] * fx * ify + img_data[off01 + 1] * ifx * fy + img_data[off11 + 1] * fx * fy;
out[2] = img_data[off00 + 2] * ifx * ify + img_data[off10 + 2] * fx * ify + img_data[off01 + 2] * ifx * fy + img_data[off11 + 2] * fx * fy;
}
void sample_hdr_vec(float out[3], float dir[3])
{
float u = (atan2(dir[1], dir[0]) + PI) * INVPI2;
float v = acos(dir[2]) * INVPI;
sample_hdr_uv(out, u * img_width, v * img_height);
}
void vec3_normalize(float vec[3])
{
float lensq = vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2];
if (lensq)
{
float inv = 1.0f / sqrtf(lensq);
vec[0] *= inv;
vec[1] *= inv;
vec[2] *= inv;
}
}
void generate_cube_face(int dx, int dy, int w, int h, float dir[3], float up[3], float rt[3])
{
#pragma omp parallel for
for (int y = 0; y < h; ++y)
{
float fy = ((float)y) / ((float)(h-1)) * -2.0f + 1.0f;
for (int x = 0; x < w; ++x)
{
float val[3];
float fx = ((float)x) / ((float)(w-1)) * -2.0f + 1.0f;
float vec[3] =
{
dir[0] + rt[0] * fx + up[0] * fy,
dir[1] + rt[1] * fx + up[1] * fy,
dir[2] + rt[2] * fx + up[2] * fy,
};
vec3_normalize(vec);
sample_hdr_vec(val, vec);
// tonemap
#define ACES_TONEMAP
#ifdef ACES_TONEMAP
// https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
val[0] *= 0.6f;
val[1] *= 0.6f;
val[2] *= 0.6f;
static const float a = 2.51f;
static const float b = 0.03f;
static const float c = 2.43f;
static const float d = 0.59f;
static const float e = 0.14f;
val[0] = (val[0]*(a*val[0]+b))/(val[0]*(c*val[0]+d)+e);
val[1] = (val[1]*(a*val[1]+b))/(val[1]*(c*val[1]+d)+e);
val[2] = (val[2]*(a*val[2]+b))/(val[2]*(c*val[2]+d)+e);
val[0] = powf(val[0], 1.0f / 2.2f);
val[1] = powf(val[1], 1.0f / 2.2f);
val[2] = powf(val[2], 1.0f / 2.2f);
#else
val[0] = val[0] / (1 + val[0]);
val[1] = val[1] / (1 + val[1]);
val[2] = val[2] / (1 + val[2]);
// gamma
val[0] = powf(val[0], 1.0f / 2.2f);
val[1] = powf(val[1], 1.0f / 2.2f);
val[2] = powf(val[2], 1.0f / 2.2f);
#endif
// clamp
if (val[0] > 1) val[0] = 1;
if (val[1] > 1) val[1] = 1;
if (val[2] > 1) val[2] = 1;
// write
uint8_t* pixel = &out_data[3 * (x + dx + (y + dy) * out_width)];
pixel[0] = val[0] * 255.0f;
pixel[1] = val[1] * 255.0f;
pixel[2] = val[2] * 255.0f;
}
}
}
typedef struct CubeFace
{
float dir[3];
float up[3];
float rt[3];
int x, y;
}
CubeFace;
CubeFace cubeFaces[6] =
{
{ { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }, 0, 0, },
{ { -1, 0, 0 }, { 0, 1, 0 }, { 0, 0, -1 }, 1, 0, },
{ { 0, 1, 0 }, { 0, 0, -1 }, { -1, 0, 0 }, 0, 1, },
{ { 0, -1, 0 }, { 0, 0, 1 }, { -1, 0, 0 }, 1, 1, },
{ { 0, 0, 1 }, { 0, 1, 0 }, { -1, 0, 0 }, 0, 2, },
{ { 0, 0, -1 }, { 0, 1, 0 }, { 1, 0, 0 }, 1, 2, },
};
#define SIDE_WIDTH 512
#define SIDE_WIDTH_PRE 128
int main()
{
load_image("input.hdr");
// check_ranges();
// generate_dump();
// save_image_jpeg("output.jpg");
alloc_image(SIDE_WIDTH*2, SIDE_WIDTH*3, 3);
for (int i = 0; i < 6; ++i)
{
generate_cube_face(SIDE_WIDTH * cubeFaces[i].x, SIDE_WIDTH * cubeFaces[i].y, SIDE_WIDTH, SIDE_WIDTH, cubeFaces[i].dir, cubeFaces[i].up, cubeFaces[i].rt);
}
save_image_jpeg("cubemap.jpg", 2);
while (out_width > SIDE_WIDTH_PRE)
ds2x();
save_image_jpeg("cubemap.preload.jpg", 2);
stbi_image_free(img_data);
free(out_data);
return 0;
}
|
mapper_vertex_morphing_matrix_free.h | // ==============================================================================
// KratosShapeOptimizationApplication
//
// License: BSD License
// license: ShapeOptimizationApplication/license.txt
//
// Main authors: Baumgaertner Daniel, https://github.com/dbaumgaertner
//
// ==============================================================================
#ifndef MAPPER_VERTEX_MORPHING_MATRIX_FREE_H
#define MAPPER_VERTEX_MORPHING_MATRIX_FREE_H
// ------------------------------------------------------------------------------
// System includes
// ------------------------------------------------------------------------------
#include <iostream>
#include <string>
#include <algorithm>
// ------------------------------------------------------------------------------
// Project includes
// ------------------------------------------------------------------------------
#include "includes/define.h"
#include "includes/model_part.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/builtin_timer.h"
#include "spaces/ublas_space.h"
#include "shape_optimization_application.h"
#include "mapper_base.h"
#include "filter_function.h"
// ==============================================================================
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class MapperVertexMorphingMatrixFree : public Mapper
{
public:
///@name Type Definitions
///@{
// Type definitions for better reading later
typedef Node < 3 > NodeType;
typedef Node < 3 > ::Pointer NodeTypePointer;
typedef std::vector<NodeType::Pointer> NodeVector;
typedef std::vector<NodeType::Pointer>::iterator NodeIterator;
typedef std::vector<double>::iterator DoubleVectorIterator;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
// Type definitions for tree-search
typedef Bucket< 3, NodeType, NodeVector, NodeTypePointer, NodeIterator, DoubleVectorIterator > BucketType;
typedef Tree< KDTreePartition<BucketType> > KDTree;
/// Pointer definition of MapperVertexMorphingMatrixFree
KRATOS_CLASS_POINTER_DEFINITION(MapperVertexMorphingMatrixFree);
///@}
///@name Life Cycle
///@{
/// Default constructor.
MapperVertexMorphingMatrixFree( ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, Parameters MapperSettings )
: mrOriginModelPart( rOriginModelPart ),
mrDestinationModelPart( rDestinationModelPart ),
mMapperSettings( MapperSettings ),
mFilterRadius( MapperSettings["filter_radius"].GetDouble() ),
mMaxNumberOfNeighbors( MapperSettings["max_nodes_in_filter_radius"].GetInt())
{
}
/// Destructor.
virtual ~MapperVertexMorphingMatrixFree()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
// --------------------------------------------------------------------------
void Initialize() override
{
BuiltinTimer timer;
KRATOS_INFO("ShapeOpt") << "Starting initialization of matrix-free mapper..." << std::endl;
CreateListOfNodesInOriginModelPart();
CreateFilterFunction();
InitializeMappingVariables();
AssignMappingIds();
CreateSearchTreeWithAllNodesInOriginModelPart();
mIsMappingInitialized = true;
KRATOS_INFO("ShapeOpt") << "Finished initialization of matrix-free mapper in " << timer.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
void Map( const Variable<array_3d> &rOriginVariable, const Variable<array_3d> &rDestinationVariable ) override
{
if (mIsMappingInitialized == false)
Initialize();
BuiltinTimer mapping_time;
KRATOS_INFO("") << std::endl;
KRATOS_INFO("ShapeOpt") << "Starting mapping of " << rOriginVariable.Name() << "..." << std::endl;
// Prepare vectors for mapping
mValuesDestination[0].clear();
mValuesDestination[1].clear();
mValuesDestination[2].clear();
// Perform mapping
const auto destination_nodes_begin = mrDestinationModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
NodeVector neighbor_nodes(mMaxNumberOfNeighbors);
std::vector<double> resulting_squared_distances(mMaxNumberOfNeighbors);
unsigned int number_of_neighbors = mpSearchTree->SearchInRadius( node_i,
mFilterRadius,
neighbor_nodes.begin(),
resulting_squared_distances.begin(),
mMaxNumberOfNeighbors );
ThrowWarningIfNumberOfNeighborsExceedsLimit(node_i, number_of_neighbors);
std::vector<double> list_of_weights( number_of_neighbors, 0.0 );
double sum_of_weights = 0.0;
ComputeWeightForAllNeighbors( node_i, neighbor_nodes, number_of_neighbors, list_of_weights, sum_of_weights );
int node_i_mapping_id = node_i.GetValue(MAPPING_ID);
for(unsigned int neighbor_itr = 0 ; neighbor_itr<number_of_neighbors ; neighbor_itr++)
{
double weight = list_of_weights[neighbor_itr] / sum_of_weights;
ModelPart::NodeType& node_j = *neighbor_nodes[neighbor_itr];
array_3d& nodal_variable = node_j.FastGetSolutionStepValue(rOriginVariable);
#pragma omp atomic
mValuesDestination[0][node_i_mapping_id] += weight*nodal_variable[0];
#pragma omp atomic
mValuesDestination[1][node_i_mapping_id] += weight*nodal_variable[1];
#pragma omp atomic
mValuesDestination[2][node_i_mapping_id] += weight*nodal_variable[2];
}
}
// Assign results to nodal variable
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
int i = node_i.GetValue(MAPPING_ID);
array_3d& r_node_vector = node_i.FastGetSolutionStepValue(rDestinationVariable);
r_node_vector(0) = mValuesDestination[0][i];
r_node_vector(1) = mValuesDestination[1][i];
r_node_vector(2) = mValuesDestination[2][i];
}
KRATOS_INFO("ShapeOpt") << "Finished mapping in " << mapping_time.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
void Map( const Variable<double> &rOriginVariable, const Variable<double> &rDestinationVariable ) override
{
if (mIsMappingInitialized == false)
Initialize();
BuiltinTimer mapping_time;
KRATOS_INFO("") << std::endl;
KRATOS_INFO("ShapeOpt") << "Starting mapping of " << rOriginVariable.Name() << "..." << std::endl;
// Prepare vectors for mapping
mValuesDestination[0].clear();
// Perform mapping
const auto destination_nodes_begin = mrDestinationModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
NodeVector neighbor_nodes(mMaxNumberOfNeighbors);
std::vector<double> resulting_squared_distances(mMaxNumberOfNeighbors);
unsigned int number_of_neighbors = mpSearchTree->SearchInRadius( node_i,
mFilterRadius,
neighbor_nodes.begin(),
resulting_squared_distances.begin(),
mMaxNumberOfNeighbors );
ThrowWarningIfNumberOfNeighborsExceedsLimit(node_i, number_of_neighbors);
std::vector<double> list_of_weights( number_of_neighbors, 0.0 );
double sum_of_weights = 0.0;
ComputeWeightForAllNeighbors( node_i, neighbor_nodes, number_of_neighbors, list_of_weights, sum_of_weights );
int node_i_mapping_id = node_i.GetValue(MAPPING_ID);
for(unsigned int neighbor_itr = 0 ; neighbor_itr<number_of_neighbors ; neighbor_itr++)
{
double weight = list_of_weights[neighbor_itr] / sum_of_weights;
ModelPart::NodeType& node_j = *neighbor_nodes[neighbor_itr];
#pragma omp atomic
mValuesDestination[0][node_i_mapping_id] += weight*node_j.FastGetSolutionStepValue(rOriginVariable);
}
}
// Assign results to nodal variable
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
int i = node_i.GetValue(MAPPING_ID);
node_i.FastGetSolutionStepValue(rDestinationVariable) = mValuesDestination[0][i];
}
KRATOS_INFO("ShapeOpt") << "Finished mapping in " << mapping_time.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
void InverseMap( const Variable<array_3d> &rDestinationVariable, const Variable<array_3d> &rOriginVariable ) override
{
if (mIsMappingInitialized == false)
Initialize();
BuiltinTimer mapping_time;
KRATOS_INFO("") << std::endl;
KRATOS_INFO("ShapeOpt") << "Starting inverse mapping of " << rDestinationVariable.Name() << "..." << std::endl;
// Prepare vectors for mapping
mValuesOrigin[0].clear();
mValuesOrigin[1].clear();
mValuesOrigin[2].clear();
// Perform mapping
const auto destination_nodes_begin = mrDestinationModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
NodeVector neighbor_nodes( mMaxNumberOfNeighbors );
std::vector<double> resulting_squared_distances( mMaxNumberOfNeighbors );
unsigned int number_of_neighbors = mpSearchTree->SearchInRadius( node_i,
mFilterRadius,
neighbor_nodes.begin(),
resulting_squared_distances.begin(),
mMaxNumberOfNeighbors );
ThrowWarningIfNumberOfNeighborsExceedsLimit(node_i, number_of_neighbors);
std::vector<double> list_of_weights( number_of_neighbors, 0.0 );
double sum_of_weights = 0.0;
ComputeWeightForAllNeighbors( node_i, neighbor_nodes, number_of_neighbors, list_of_weights, sum_of_weights );
array_3d& nodal_variable = node_i.FastGetSolutionStepValue(rDestinationVariable);
for(unsigned int neighbor_itr = 0 ; neighbor_itr<number_of_neighbors ; neighbor_itr++)
{
ModelPart::NodeType& neighbor_node = *neighbor_nodes[neighbor_itr];
int neighbor_node_mapping_id = neighbor_node.GetValue(MAPPING_ID);
double weight = list_of_weights[neighbor_itr] / sum_of_weights;
#pragma omp atomic
mValuesOrigin[0][neighbor_node_mapping_id] += weight*nodal_variable[0];
#pragma omp atomic
mValuesOrigin[1][neighbor_node_mapping_id] += weight*nodal_variable[1];
#pragma omp atomic
mValuesOrigin[2][neighbor_node_mapping_id] += weight*nodal_variable[2];
}
}
// Assign results to nodal variable
const auto origin_nodes_begin = mrOriginModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrOriginModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(origin_nodes_begin + node_itr);
int i = node_i.GetValue(MAPPING_ID);
array_3d& r_node_vector = node_i.FastGetSolutionStepValue(rOriginVariable);
r_node_vector(0) = mValuesOrigin[0][i];
r_node_vector(1) = mValuesOrigin[1][i];
r_node_vector(2) = mValuesOrigin[2][i];
}
KRATOS_INFO("ShapeOpt") << "Finished mapping in " << mapping_time.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
void InverseMap( const Variable<double> &rDestinationVariable, const Variable<double> &rOriginVariable ) override
{
if (mIsMappingInitialized == false)
Initialize();
BuiltinTimer mapping_time;
KRATOS_INFO("") << std::endl;
KRATOS_INFO("ShapeOpt") << "Starting inverse mapping of " << rDestinationVariable.Name() << "..." << std::endl;
// Prepare vectors for mapping
mValuesOrigin[0].clear();
// Perform mapping
const auto destination_nodes_begin = mrDestinationModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrDestinationModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(destination_nodes_begin + node_itr);
NodeVector neighbor_nodes( mMaxNumberOfNeighbors );
std::vector<double> resulting_squared_distances( mMaxNumberOfNeighbors );
unsigned int number_of_neighbors = mpSearchTree->SearchInRadius( node_i,
mFilterRadius,
neighbor_nodes.begin(),
resulting_squared_distances.begin(),
mMaxNumberOfNeighbors );
ThrowWarningIfNumberOfNeighborsExceedsLimit(node_i, number_of_neighbors);
std::vector<double> list_of_weights( number_of_neighbors, 0.0 );
double sum_of_weights = 0.0;
ComputeWeightForAllNeighbors( node_i, neighbor_nodes, number_of_neighbors, list_of_weights, sum_of_weights );
double variable_value = node_i.FastGetSolutionStepValue(rDestinationVariable);
for(unsigned int neighbor_itr = 0 ; neighbor_itr<number_of_neighbors ; neighbor_itr++)
{
ModelPart::NodeType& neighbor_node = *neighbor_nodes[neighbor_itr];
int neighbor_node_mapping_id = neighbor_node.GetValue(MAPPING_ID);
double weight = list_of_weights[neighbor_itr] / sum_of_weights;
#pragma omp atomic
mValuesOrigin[0][neighbor_node_mapping_id] += weight*variable_value;
}
}
// Assign results to nodal variable
const auto origin_nodes_begin = mrOriginModelPart.NodesBegin();
#pragma omp parallel for
for(int node_itr=0; node_itr < static_cast<int>(mrOriginModelPart.NumberOfNodes()); node_itr++)
{
auto& node_i = *(origin_nodes_begin + node_itr);
int i = node_i.GetValue(MAPPING_ID);
node_i.FastGetSolutionStepValue(rOriginVariable) = mValuesOrigin[0][i];
}
KRATOS_INFO("ShapeOpt") << "Finished mapping in " << mapping_time.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
void Update() override
{
if (mIsMappingInitialized == false)
KRATOS_ERROR << "Mapping has to be initialized before calling the Update-function!";
BuiltinTimer timer;
KRATOS_INFO("ShapeOpt") << "Starting to update mapper..." << std::endl;
CreateSearchTreeWithAllNodesInOriginModelPart();
KRATOS_INFO("ShapeOpt") << "Finished updating of mapper in " << timer.ElapsedSeconds() << " s." << std::endl;
}
// --------------------------------------------------------------------------
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const override
{
return "MapperVertexMorphingMatrixFree";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "MapperVertexMorphingMatrixFree";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
// Initialized by class constructor
ModelPart& mrOriginModelPart;
ModelPart& mrDestinationModelPart;
Parameters mMapperSettings;
double mFilterRadius;
unsigned int mMaxNumberOfNeighbors;
FilterFunction::Pointer mpFilterFunction;
// Variables for spatial search
unsigned int mBucketSize = 100;
NodeVector mListOfNodesInOriginModelPart;
KDTree::Pointer mpSearchTree;
// Variables for mapping
std::vector<Vector> mValuesOrigin;
std::vector<Vector> mValuesDestination;
bool mIsMappingInitialized = false;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
// --------------------------------------------------------------------------
void CreateListOfNodesInOriginModelPart()
{
mListOfNodesInOriginModelPart.resize(mrOriginModelPart.Nodes().size());
int counter = 0;
for (ModelPart::NodesContainerType::iterator node_it = mrOriginModelPart.NodesBegin(); node_it != mrOriginModelPart.NodesEnd(); ++node_it)
{
NodeTypePointer pnode = *(node_it.base());
mListOfNodesInOriginModelPart[counter++] = pnode;
}
}
// --------------------------------------------------------------------------
void CreateFilterFunction()
{
std::string filter_type = mMapperSettings["filter_function_type"].GetString();
double filter_radius = mMapperSettings["filter_radius"].GetDouble();
mpFilterFunction = Kratos::shared_ptr<FilterFunction>(new FilterFunction(filter_type, filter_radius));
}
// --------------------------------------------------------------------------
void InitializeMappingVariables()
{
const unsigned int origin_node_number = mrOriginModelPart.Nodes().size();
mValuesOrigin.resize(3,ZeroVector(origin_node_number));
const unsigned int destination_node_number = mrDestinationModelPart.Nodes().size();
mValuesDestination.resize(3,ZeroVector(destination_node_number));
}
// --------------------------------------------------------------------------
void AssignMappingIds()
{
unsigned int i = 0;
for(auto& node_i : mrOriginModelPart.Nodes())
node_i.SetValue(MAPPING_ID,i++);
i = 0;
for(auto& node_i : mrDestinationModelPart.Nodes())
node_i.SetValue(MAPPING_ID,i++);
}
// --------------------------------------------------------------------------
void CreateSearchTreeWithAllNodesInOriginModelPart()
{
BuiltinTimer timer;
KRATOS_INFO("ShapeOpt") << "Creating search tree to perform mapping..." << std::endl;
mpSearchTree = Kratos::shared_ptr<KDTree>(new KDTree(mListOfNodesInOriginModelPart.begin(), mListOfNodesInOriginModelPart.end(), mBucketSize));
KRATOS_INFO("ShapeOpt") << "Search tree created in: " << timer.ElapsedSeconds() << " s" << std::endl;
}
// --------------------------------------------------------------------------
void ThrowWarningIfNumberOfNeighborsExceedsLimit(ModelPart::NodeType& given_node, unsigned int number_of_neighbors)
{
if(number_of_neighbors >= mMaxNumberOfNeighbors)
KRATOS_WARNING("ShapeOpt::MapperVertexMorphingMatrixFree") << "For node " << given_node.Id() << " and specified filter radius, maximum number of neighbor nodes (=" << mMaxNumberOfNeighbors << " nodes) reached!" << std::endl;
}
// --------------------------------------------------------------------------
void ComputeWeightForAllNeighbors( ModelPart::NodeType& design_node,
NodeVector& neighbor_nodes,
unsigned int number_of_neighbors,
std::vector<double>& list_of_weights,
double& sum_of_weights )
{
for(unsigned int neighbor_itr = 0 ; neighbor_itr<number_of_neighbors ; neighbor_itr++)
{
ModelPart::NodeType& neighbor_node = *neighbor_nodes[neighbor_itr];
double weight = mpFilterFunction->compute_weight( design_node.Coordinates(), neighbor_node.Coordinates() );
list_of_weights[neighbor_itr] = weight;
sum_of_weights += weight;
}
}
// --------------------------------------------------------------------------
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
// MapperVertexMorphingMatrixFree& operator=(MapperVertexMorphingMatrixFree const& rOther);
/// Copy constructor.
// MapperVertexMorphingMatrixFree(MapperVertexMorphingMatrixFree const& rOther);
///@}
}; // Class MapperVertexMorphingMatrixFree
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos.
#endif // MAPPER_VERTEX_MORPHING_MATRIX_FREE_H
|
DemBonesExt.h | ///////////////////////////////////////////////////////////////////////////////
// Dem Bones - Skinning Decomposition Library //
// Copyright (c) 2019, Electronic Arts. All rights reserved. //
///////////////////////////////////////////////////////////////////////////////
#ifndef DEM_BONES_EXT
#define DEM_BONES_EXT
#include "DemBones.h"
#include <Eigen/Geometry>
#ifndef DEM_BONES_MAT_BLOCKS
#include "MatBlocks.h"
#define DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED
#endif
namespace Dem
{
/** @class DemBonesExt DemBonesExt.h "DemBones/DemBonesExt.h"
@brief Extended class to handle hierarchical skeleton with local rotations/translations and bind matrices
@details Call computeRTB() to get local rotations/translations and bind matrices after skinning decomposition is done and other data is set.
@b _Scalar is the floating-point data type. @b _AniMeshScalar is the floating-point data type of mesh sequence #v.
*/
template<class _Scalar, class _AniMeshScalar>
class DemBonesExt: public DemBones<_Scalar, _AniMeshScalar> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Matrix4=Eigen::Matrix<_Scalar, 4, 4>;
using Matrix3=Eigen::Matrix<_Scalar, 3, 3>;
using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>;
using Vector4=Eigen::Matrix<_Scalar, 4, 1>;
using Vector3=Eigen::Matrix<_Scalar, 3, 1>;
using SparseMatrix=Eigen::SparseMatrix<_Scalar>;
using Triplet=Eigen::Triplet<_Scalar>;
using DemBones<_Scalar, _AniMeshScalar>::nIters;
using DemBones<_Scalar, _AniMeshScalar>::nInitIters;
using DemBones<_Scalar, _AniMeshScalar>::nTransIters;
using DemBones<_Scalar, _AniMeshScalar>::transAffine;
using DemBones<_Scalar, _AniMeshScalar>::transAffineNorm;
using DemBones<_Scalar, _AniMeshScalar>::nWeightsIters;
using DemBones<_Scalar, _AniMeshScalar>::nnz;
using DemBones<_Scalar, _AniMeshScalar>::weightsSmooth;
using DemBones<_Scalar, _AniMeshScalar>::weightsSmoothStep;
using DemBones<_Scalar, _AniMeshScalar>::weightEps;
using DemBones<_Scalar, _AniMeshScalar>::nV;
using DemBones<_Scalar, _AniMeshScalar>::nB;
using DemBones<_Scalar, _AniMeshScalar>::nS;
using DemBones<_Scalar, _AniMeshScalar>::nF;
using DemBones<_Scalar, _AniMeshScalar>::fStart;
using DemBones<_Scalar, _AniMeshScalar>::subjectID;
using DemBones<_Scalar, _AniMeshScalar>::u;
using DemBones<_Scalar, _AniMeshScalar>::w;
using DemBones<_Scalar, _AniMeshScalar>::lockW;
using DemBones<_Scalar, _AniMeshScalar>::m;
using DemBones<_Scalar, _AniMeshScalar>::lockM;
using DemBones<_Scalar, _AniMeshScalar>::v;
using DemBones<_Scalar, _AniMeshScalar>::fv;
using DemBones<_Scalar, _AniMeshScalar>::iter;
using DemBones<_Scalar, _AniMeshScalar>::iterTransformations;
using DemBones<_Scalar, _AniMeshScalar>:: iterWeights;
//! Timestamps for bone transformations #m, [@c size] = #nS, #fTime(@p k) is the timestamp of frame @p k
Eigen::VectorXd fTime;
//! Name of bones, [@c size] = #nB, #boneName(@p j) is the name bone of @p j
std::vector<std::string> boneName;
//! Parent bone index, [@c size] = #nB, #parent(@p j) is the index of parent bone of @p j, #parent(@p j) = -1 if @p j has no parent.
Eigen::VectorXi parent;
//! Original bind pre-matrix, [@c size] = [4*#nS, 4*#nB], #bind.@a block(4*@p s, 4*@p j, 4, 4) is the global bind matrix of bone @p j on subject @p s at the rest pose
MatrixX bind;
//! Inverse pre-multiplication matrices, [@c size] = [4*#nS, 4*#nB], #preMulInv.@a block(4*@p s, 4*@p j, 4, 4) is the inverse of pre-local transformation of bone @p j on subject @p s
MatrixX preMulInv;
//! Rotation order, [@c size] = [3*#nS, #nB], #rotOrder.@a col(@p j).@a segment<3>(3*@p s) is the rotation order of bone @p j on subject @p s, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order
Eigen::MatrixXi rotOrder;
//! Orientations of bones, [@c size] = [3*#nS, #nB], @p orient.@a col(@p j).@a segment<3>(3*@p s) is the(@c rx, @c ry, @c rz) orientation of bone @p j in degree
MatrixX orient;
//! Bind transformation update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity, 2=do 1 and group joints
int bindUpdate;
/** @brief Constructor and setting default parameters
*/
DemBonesExt(): bindUpdate(0) {
clear();
}
/** @brief Clear all data
*/
void clear() {
fTime.resize(0);
boneName.resize(0);
parent.resize(0);
bind.resize(0, 0);
preMulInv.resize(0, 0);
rotOrder.resize(0, 0);
orient.resize(0, 0);
DemBones<_Scalar, _AniMeshScalar>::clear();
}
/** @brief Local rotations, translations and global bind matrices of a subject
@details Required all data in the base class: #u, #fv, #nV, #v, #nF, #fStart, #subjectID, #nS, #m, #w, #nB
This function will initialize missing attributes:
- #parent: -1 vector (if no joint grouping) or parent to a root, [@c size] = #nB
- #preMulInv: 4*4 identity matrix blocks, [@c size] = [4*#nS, 4*#nB]
- #rotOrder: {0, 1, 2} vector blocks, [@c size] = [3*#nS, #nB]
- #orient: 0 matrix, [@c size] = [3*#nS, #nB]
@param[in] s is the subject index
@param[out] lr is the [3*@p nFr, #nB] by-reference output local rotations, @p lr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j at frame @p k
@param[out] lt is the [3*@p nFr, #nB] by-reference output local translations, @p lt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j at frame @p k
@param[out] gb is the [4, 4*#nB] by-reference output global bind matrices, @p gb.@a block(0, 4*@p j, 4, 4) is the bind matrix of bone j
@param[out] lbr is the [3, #nB] by-reference output local rotations at bind pose @p lbr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j
@param[out] lbt is the [3, #nB] by-reference output local translations at bind pose, @p lbt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j
@param[in] degreeRot=true will output rotations in degree, otherwise output in radian
*/
void computeRTB(int s, MatrixX& lr, MatrixX& lt, MatrixX& gb, MatrixX& lbr, MatrixX& lbt, bool degreeRot=true) {
computeBind(s, gb);
if (parent.size()==0) {
if (bindUpdate==2) {
int root=computeRoot();
parent=Eigen::VectorXi::Constant(nB, root);
parent(root)=-1;
} else parent=Eigen::VectorXi::Constant(nB, -1);
}
if (preMulInv.size()==0) preMulInv=MatrixX::Identity(4, 4).replicate(nS, nB);
if (rotOrder.size()==0) rotOrder=Eigen::Vector3i(0, 1, 2).replicate(nS, nB);
if (orient.size()==0) orient=MatrixX::Zero(3*nS, nB);
int nFs=fStart(s+1)-fStart(s);
lr.resize(nFs*3, nB);
lt.resize(nFs*3, nB);
lbr.resize(3, nB);
lbt.resize(3, nB);
MatrixX lm(4*nFs, 4*nB);
#pragma omp parallel for
for (int j=0; j<nB; j++) {
Eigen::Vector3i ro=rotOrder.col(j).template segment<3>(s*3);
Vector3 ov=orient.vec3(s, j)*EIGEN_PI/180;
Matrix3 invOM=Matrix3(Eigen::AngleAxis<_Scalar>(ov(ro(2)), Vector3::Unit(ro(2))))*
Eigen::AngleAxis<_Scalar>(ov(ro(1)), Vector3::Unit(ro(1)))*
Eigen::AngleAxis<_Scalar>(ov(ro(0)), Vector3::Unit(ro(0)));
invOM.transposeInPlace();
Matrix4 lb;
if (parent(j)==-1) lb=preMulInv.blk4(s, j)*gb.blk4(0, j);
else lb=preMulInv.blk4(s, j)*gb.blk4(0, parent(j)).inverse()*gb.blk4(0, j);
Vector3 curRot=Vector3::Zero();
toRot(invOM*lb.template topLeftCorner<3, 3>(), curRot, ro);
lbr.col(j)=curRot;
lbt.col(j)=lb.template topRightCorner<3, 1>();
Matrix4 lm;
for (int k=0; k<nFs; k++) {
if (parent(j)==-1) lm=preMulInv.blk4(s, j)*m.blk4(k+fStart(s), j)*gb.blk4(0, j);
else lm=preMulInv.blk4(s, j)*(m.blk4(k+fStart(s), parent(j))*gb.blk4(0, parent(j))).inverse()*m.blk4(k+fStart(s), j)*gb.blk4(0, j);
toRot(invOM*lm.template topLeftCorner<3, 3>(), curRot, ro);
lr.vec3(k, j)=curRot;
lt.vec3(k, j)=lm.template topRightCorner<3, 1>();
}
}
if (degreeRot) {
lr*=180/EIGEN_PI;
lbr*=180/EIGEN_PI;
}
}
private:
/** p-norm centroids (using #transAffineNorm) and rotations to identity
@param s is the subject index
@param b is the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j
*/
void computeCentroids(int s, MatrixX& b) {
MatrixX c=MatrixX::Zero(4, nB);
for (int i=0; i<nV; i++)
for (typename SparseMatrix::InnerIterator it(w, i); it; ++it)
c.col(it.row())+=pow(it.value(), transAffineNorm)*u.vec3(s, i).homogeneous();
for (int j=0; j<nB; j++)
if ((c(3, j)!=0)&&(lockM(j)==0)) b.transVec(0, j)=c.col(j).template head<3>()/c(3, j);
}
/** Global bind pose
@param s is the subject index
@param bindUpdate is the type of bind pose update, 0=keep original, 1 or 2=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity
@param b is the the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j
*/
void computeBind(int s, MatrixX& b) {
if (bind.size()==0) {
lockM=Eigen::VectorXi::Zero(nB);
bind.resize(nS*4, nB*4);
for (int k=0; k<nS; k++) {
b=MatrixX::Identity(4, 4).replicate(1, nB);
computeCentroids(k, b);
bind.block(4*k, 0, 4, 4*nB)=b;
}
}
b=bind.block(4*s, 0, 4, 4*nB);
if (bindUpdate>=1) computeCentroids(s, b);
}
/** Root joint
*/
int computeRoot() {
VectorX err(nB);
#pragma omp parallel for
for (int j=0; j<nB; j++) {
double ej=0;
for (int i=0; i<nV; i++)
for (int k=0; k<nF; k++) ej+=(m.rotMat(k, j)*u.vec3(subjectID(k), i)+m.transVec(k, j)-v.vec3(k, i).template cast<_Scalar>()).squaredNorm();
err(j)=ej;
}
int rj;
err.minCoeff(&rj);
return rj;
}
/** Euler angles from rotation matrix
@param rMat is the 3*3 rotation matrix
@param curRot is the input current Euler angles, it is also the by-reference output closet Euler angles correspond to @p rMat
@param ro is the rotation order, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order
@param eps is the epsilon
*/
void toRot(const Matrix3& rMat, Vector3& curRot, const Eigen::Vector3i& ro, _Scalar eps=_Scalar(1e-10)) {
Vector3 r0=rMat.eulerAngles(ro(2), ro(1), ro(0)).reverse();
_Scalar gMin=(r0-curRot).squaredNorm();
Vector3 rMin=r0;
Vector3 r;
Matrix3 tmpMat;
for (int fx=-1; fx<=1; fx+=2)
for (_Scalar sx=-2*EIGEN_PI; sx<2.1*EIGEN_PI; sx+=EIGEN_PI) {
r(0)=fx*r0(0)+sx;
for (int fy=-1; fy<=1; fy+=2)
for (_Scalar sy=-2*EIGEN_PI; sy<2.1*EIGEN_PI; sy+=EIGEN_PI) {
r(1)=fy*r0(1)+sy;
for (int fz=-1; fz<=1; fz+=2)
for (_Scalar sz=-2*EIGEN_PI; sz<2.1*EIGEN_PI; sz+=EIGEN_PI) {
r(2)=fz*r0(2)+sz;
tmpMat=Matrix3(Eigen::AngleAxis<_Scalar>(r(ro(2)), Vector3::Unit(ro(2))))*
Eigen::AngleAxis<_Scalar>(r(ro(1)), Vector3::Unit(ro(1)))*
Eigen::AngleAxis<_Scalar>(r(ro(0)), Vector3::Unit(ro(0)));
if ((tmpMat-rMat).squaredNorm()<eps) {
_Scalar tmp=(r-curRot).squaredNorm();
if (tmp<gMin) {
gMin=tmp;
rMin=r;
}
}
}
}
}
curRot=rMin;
}
};
}
#ifdef DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED
#undef blk4
#undef rotMat
#undef transVec
#undef vec3
#undef DEM_BONES_MAT_BLOCKS
#endif
#undef rotMatFromEuler
#endif
|
GB_unop__tan_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tan_fc64_fc64)
// op(A') function: GB (_unop_tran__tan_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = ctan (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctan (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = ctan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TAN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tan_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctan (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tan_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_binop__rdiv_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint8)
// A*D function (colscale): GB (_AxD__rdiv_uint8)
// D*A function (rowscale): GB (_DxB__rdiv_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint8)
// C=scalar+B GB (_bind1st__rdiv_uint8)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint8)
// C=A+scalar GB (_bind2nd__rdiv_uint8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
a.2.1.c | /* { dg-do run } */
#include <stdio.h>
#include <omp.h>
extern void abort (void);
int
main ()
{
int bad, x;
x = 2;
bad = 0;
#pragma omp parallel num_threads(2) shared(x, bad)
{
if (omp_get_thread_num () == 0)
{
volatile int i;
for (i = 0; i < 100000000; i++)
x = 5;
}
else
{
/* Print 1: the following read of x has a race */
if (x != 2 && x != 5)
bad = 1;
}
#pragma omp barrier
if (omp_get_thread_num () == 0)
{
/* x must be 5 now. */
if (x != 5)
bad = 1;
}
else
{
/* x must be 5 now. */
if (x != 5)
bad = 1;
}
}
if (bad)
abort ();
return 0;
}
|
create_SNN_graph2_omp.c | // Author: Fabio Rodrigues Pereira
// E-mail: fabior@uio.no
#include <stdlib.h> // rand, malloc, calloc and free.
#include <stdio.h> // printf
#include <omp.h>
void create_SNN_graph2_omp(int N, int *row_ptr, int *col_idx, int **SNN_val)
{
// allocating SNN_val that has the same length of col_idx
(*SNN_val) = calloc(row_ptr[N], sizeof **SNN_val);
// global variables are shared by default in the parallel region
size_t z, x, i, j, row_nr;
unsigned long count;
#pragma omp parallel for private(z, x, i, j, row_nr) reduction(+:count)
for ( z = 0; z < row_ptr[N]; z++ )
{
count = 0;
for ( x = 0; x < N+1; x++ ) // getting row
{
if ( z < row_ptr[x] )
{
row_nr=x-1;
break;
}
}
for ( i = row_ptr[row_nr]; i < row_ptr[row_nr + 1]; i++ ) // in element/row
{
for ( j = row_ptr[col_idx[z]]; j < row_ptr[col_idx[z] + 1]; j++ ) // in element/col
if ( col_idx[i] == col_idx[j] )
count += 1;
}
(*SNN_val)[z] = count;
}
}
|
main.c | void bar(int M, int *restrict T, int N, int *restrict A) {
#pragma omp parallel default(shared)
{
#pragma omp for
for (int I = 0; I < N; ++I) {
A[I] = I;
for (int J = 0; J < M; ++J)
A[I] = A[I] + T[J];
}
}
}
void foo(int N, int *A) {
int TSize = 4;
int T[4];
for (int I = 0; I < TSize; ++I)
T[I] = I;
#pragma spf region
{ bar(TSize, T, N, A); }
}
|
residualbased_newton_raphson_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "custom_python/process_factory_utility.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ProcessFactoryUtility::Pointer ProcessesListType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonContactStrategy() override
= default;
//******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************//
//***********************************************************************************//
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// Set to zero the weighted gap
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
const bool frictional = r_model_part.Is(SLIP);
// We predict contact pressure in case of contact problem
if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
if (frictional) {
VariableUtils().SetVariable(WEIGHTED_SLIP, zero_array, nodes_array);
}
// Compute the current gap
ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
// We predict a contact pressure
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const std::size_t step = r_process_info[STEP];
if (step == 1) {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
} else {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1));
}
}
}
// BaseType::Predict(); // NOTE: May cause problems in dynamics!!!
//
// // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated
// ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
//
// // We predict contact pressure in case of contact problem
// if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
// VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
//
// // Compute the current gap
// ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
//
// // We predict a contact pressure
// ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
// const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY];
//
// // We iterate over the nodes
// bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true;
//
// #pragma omp parallel for
// for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
// auto it_node = nodes_array.begin() + i;
//
// const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
//
// const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter;
//
// if (current_gap < 0.0) {
// it_node->Set(ACTIVE, true);
// if (is_components) {
// it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap;
// } else {
// const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL);
// it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal;
// }
// }
// }
// }
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
BaseType::Initialize();
mFinalizeWasPerformed = false;
// Initializing NL_ITERATION_NUMBER
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
r_process_info[NL_ITERATION_NUMBER] = 1;
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep();
// TODO: Add something if necessary
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
BaseType::InitializeSolutionStep();
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
mFinalizeWasPerformed = false;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
if (mFinalizeWasPerformed == false) {
BaseType::FinalizeSolutionStep();
// To avoid compute twice the FinalizeSolutionStep
mFinalizeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
// bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations
// bool is_converged = BaseSolveSolutionStep(); // Direct solution
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
if (r_model_part.IsNot(INTERACTION)) {
// We get the system
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
int inner_iteration = 0;
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
r_process_info[INNER_LOOP_ITERATION] = inner_iteration;
is_converged = BaseSolveSolutionStep();
// We check the convergence
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b);
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
// We compute the base loop
r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1;
is_converged = BaseSolveSolutionStep();
}
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (!is_converged) {
is_converged = AdaptativeStep();
}
}
return is_converged;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
// ADAPTATIVE STRATEGY PARAMETERS
bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed
ProcessesListType mpMyProcesses; /// The processes list
ProcessesListType mpPostProcesses; /// The post processes list
// OTHER PARAMETERS
int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria
///@}
///@name Protected Operators
///@{
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool BaseSolveSolutionStep()
{
KRATOS_TRY;
// Pointers needed in the solution
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
typename TSchemeType::Pointer p_scheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// Initializing the parameters of the Newton-Raphson cicle
IndexType iteration_number = 1;
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
// We do a geometry check before solve the system for first time
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx); //Dx=0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
if (is_converged) {
// Initialisation of the convergence criteria
BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb);
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
// Iteration Cicle... performed only for NonLinearProblems
while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) {
//setting the number of iteration
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) {
if( BaseType::GetKeepSystemConstantDuringIterations() == false) {
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
//std::cout << "mb is calculated" << std::endl;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0)
MaxIterationsExceeded();
// Recalculate residual if needed
// (note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method performs the adaptative step
*/
bool AdaptativeStep()
{
KRATOS_TRY;
bool is_converged = false;
// Plots a warning if the maximum number of iterations is exceeded
if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl;
if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl;
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later
int split_number = 0;
// We iterate until we reach the convergence or we split more than desired
while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) {
// Expliting time step as a way to try improve the convergence
split_number += 1;
double aux_delta_time, current_time;
const double aux_time = SplitTimeStep(aux_delta_time, current_time);
current_time += aux_delta_time;
bool inside_the_split_is_converged = false;
IndexType inner_iteration = 0;
while (current_time <= aux_time) {
inner_iteration += 1;
r_process_info[STEP] += 1;
if (inner_iteration == 1) {
if (StrategyBaseType::MoveMeshFlag())
UnMoveMesh();
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
it_node->OverwriteSolutionStepData(1, 0);
// it_node->OverwriteSolutionStepData(2, 1);
}
r_process_info.SetCurrentTime(current_time); // Reduces the time step
FinalizeSolutionStep();
} else {
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i)
(nodes_array.begin() + i)->CloneSolutionStepData();
r_process_info.CloneSolutionStepInfo();
r_process_info.ClearHistory(r_model_part.GetBufferSize());
r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step
}
// We execute the processes before the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteInitializeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteInitializeSolutionStep();
// In order to initialize again everything
BaseType::mInitializeWasPerformed = false;
mFinalizeWasPerformed = false;
// We repeat the solve with the new DELTA_TIME
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
inside_the_split_is_converged = BaseType::SolveSolutionStep();
this->FinalizeSolutionStep();
// We execute the processes after the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteFinalizeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteFinalizeSolutionStep();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteBeforeOutputStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->PrintOutput();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteAfterOutputStep();
current_time += aux_delta_time;
}
if (inside_the_split_is_converged)
is_converged = true;
}
// Plots a warning if the maximum number of iterations and splits are exceeded
if (is_converged == false)
MaxIterationsAndSplitsExceeded();
// Restoring original DELTA_TIME
r_process_info[DELTA_TIME] = original_delta_time;
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Here the database is updated
* @param A The LHS matrix
* @param Dx The increment of solution after solving system
* @param b The RHS vector
* @param MoveMesh The flag that tells if the mesh should be moved
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
BaseType::UpdateDatabase(A,Dx,b,MoveMesh);
// TODO: Add something if necessary
}
/**
* @brief his method checks if there is no element inverted
*/
bool CheckGeometryInverted()
{
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
bool inverted_element = false;
ElementsArrayType& elements_array = r_model_part.Elements();
// NOT OMP
for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) {
auto it_elem = elements_array.begin() + i;
auto& geom = it_elem->GetGeometry();
if (geom.DeterminantOfJacobian(0) < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(geom.DeterminantOfJacobian(0))
}
return true;
}
// We check now the deformation gradient
std::vector<Matrix> deformation_gradient_matrices;
it_elem->CalculateOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info);
for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) {
const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]);
if (det_f < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(det_f)
}
return true;
}
}
}
return inverted_element;
}
/**
* @brief Here the time step is splitted
* @param AuxDeltaTime The new delta time to be considered
* @param CurrentTime The current time
* @return The destination time
*/
double SplitTimeStep(
double& AuxDeltaTime,
double& CurrentTime
)
{
KRATOS_TRY;
const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME];
CurrentTime = aux_time - AuxDeltaTime;
StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one
AuxDeltaTime /= mThisParameters["split_factor"].GetDouble();
StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time
CoutSplittingTime(AuxDeltaTime, aux_time);
return aux_time;
KRATOS_CATCH("");
}
/**
* This method moves bak the mesh to the previous position
*/
void UnMoveMesh()
{
KRATOS_TRY;
if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false)
KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
}
KRATOS_CATCH("");
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"adaptative_strategy" : false,
"split_factor" : 10.0,
"max_number_splits" : 3,
"inner_loop_iterations" : 5
})" );
return default_parameters;
}
/**
* @brief This method prints information after solving the problem
*/
void CoutSolvingProblem()
{
if (mConvergenceCriteriaEchoLevel != 0) {
std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl;
}
}
/**
* @brief This method prints information after split the increment of time
* @param AuxDeltaTime The new time step to be considered
* @param AuxTime The destination time
*/
void CoutSplittingTime(
const double AuxDeltaTime,
const double AuxTime
)
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
std::cout.precision(4);
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl;
std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl;
std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl;
std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations
*/
void MaxIterationsExceeded() override
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations and splits
*/
void MaxIterationsAndSplitsExceeded()
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
|
ParallelOpenMP.h | #pragma once
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
// Work around memory leak when using 1 thread in nested "omp parallel"
// caused by some buggy OpenMP versions and the fact that omp_in_parallel()
// returns false when omp_get_max_threads() == 1 inside nested "omp parallel"
// See issue gh-32284
#pragma omp parallel if (omp_get_max_threads() > 1 && !omp_in_parallel() && ((end - begin) > grain_size))
{
// choose number of tasks based on grain size and number of threads
// can't use num_threads clause due to bugs in GOMP's thread pool (See #32008)
int64_t num_threads = omp_get_num_threads();
if (grain_size > 0) {
num_threads = std::min(num_threads, divup((end - begin), grain_size));
}
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
heisenberg_hamiltonian_mpi.h | /*****************************************************************************
*
* Rokko: Integrated Interface for libraries of eigenvalue decomposition
*
* Copyright (C) 2012-2019 Rokko Developers https://github.com/t-sakashita/rokko
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
*****************************************************************************/
#ifndef ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
#define ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
#include "mpi.h"
void multiply(const MPI_Comm comm, int L, int lattice_size, int lattice_first[], int lattice_second[], const double* v, double* w, double* buffer) {
int myrank, nproc;
MPI_Status status;
MPI_Comm_size(comm, &nproc);
MPI_Comm_rank(comm, &myrank);
int n = nproc;
int p = -1;
do {
n /= 2;
++p;
} while (n > 0);
if (nproc != (1 << p)) {
if ( myrank == 0 ) {
printf("This program can be run only for powers of 2\n");
}
MPI_Abort(comm, 1);
}
int N = 1 << (L-p);
for(int k=0; k<N; ++k) {
w[k] = 0.;
}
for (size_t l = 0; l < lattice_size; ++l) {
int i = lattice_first[l];
int j = lattice_second[l];
if (i < (L-p)) {
if (j < (L-p)) {
int m1 = 1 << i;
int m2 = 1 << j;
int m3 = m1 + m2;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m3) == m1) { // when (bit i == 1, bit j == 0) or (bit i == 0, bit j == 1)
w[k] += 0.5 * v[k^m3] - 0.25 * v[k];
} else if ((k & m3) == m2) {
w[k] += 0.5 * v[k^m3] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
} else {
int m = 1 << (j-(L-p));
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
int m1 = 1 << i;
if ((myrank & m) == m) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.25 * v[k];
} else {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
}
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
}
}
} else {
if (j < (L-p)) {
int m = 1 << (i-(L-p));
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
int m1 = 1 << j;
if ((myrank & m) == m) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.25 * v[k];
} else {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
}
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
}
} else {
int m = (1 << (i-(L-p))) + (1 << (j-(L-p)));
if (((myrank & m) != m) && ((myrank & m) != 0)) {
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
w[k] += 0.5 * buffer[k] - 0.25 * v[k];
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
w[k] += 0.25 * v[k];
}
}
}
}
}
}
#endif // ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
|
openmp-ex16.c | /* This example for computing pi is adapted from Hager & Wellein, Listing 6.2.
*
* We compute $\pi = \int_0^1 \frac{4}{1 + x^2} dx$.
*
* In this example we use two-point Gaussian quadrature rule.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <tictoc.h>
int main(int argc, char **argv)
{
int i, j, M = 10, N = 10;
double h, phi[2], relerr, pi = M_PI;
double mintime = 0., maxtime = 0., avgtime = 0.;
TicTocTimer timer;
/* if this program is run with an argument, take the first argument to be N */
if (argc > 1) {
N = atoi(argv[1]);
}
/* h is the width of each interval */
h = 1. / N;
/* compute the quadrature points and weights */
phi[0] = (-sqrt(1./3.) + 1.) / 2.;
phi[1] = ( sqrt(1./3.) + 1.) / 2.;
for (j = 0; j < M; j++) {
double time;
pi = 0.;
timer = tic();
#pragma omp parallel for reduction(+:pi)
for (i = 0; i < N; i++) {
int k;
for (k = 0; k < 2; k++) {
double x = h * (i + phi[k]);
/* let's pretend this is a lengthier calculation */
usleep(1);
pi += h * 0.5 * 4. / (1. + x*x);
}
}
time = toc(&timer);
if (j == 1) {
mintime = maxtime = avgtime = time;
} else if (j > 1) {
mintime = time < mintime ? time : mintime;
maxtime = time > maxtime ? time : maxtime;
avgtime += time;
}
}
avgtime /= (M - 1);
relerr = fabs(M_PI - pi) / M_PI;
printf("Computed pi %g, relative error %g\n", pi, relerr);
printf("Calculation time %g [%g, %g]\n", avgtime, mintime, maxtime);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
vla-5.c | // { dg-do compile }
void foo(int n, int i)
{
int A[n];
#pragma omp parallel sections lastprivate(A)
{
A[i] = 1;
}
}
|
GB_extractTuples.c | //------------------------------------------------------------------------------
// GB_extractTuples: extract all the tuples from a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Extracts all tuples from a matrix, like [I,J,X] = find (A). If any
// parameter I, J and/or X is NULL, then that component is not extracted. The
// size of the I, J, and X arrays (those that are not NULL) is given by nvals,
// which must be at least as large as GrB_nvals (&nvals, A). The values in the
// matrix are typecasted to the type of X, as needed.
// This function does the work for the user-callable GrB_*_extractTuples
// functions.
#include "GB.h"
#define GB_FREE_ALL \
{ \
GB_FREE (Ap) ; \
GB_FREE (X_bitmap) ; \
}
GrB_Info GB_extractTuples // extract all tuples from a matrix
(
GrB_Index *I_out, // array for returning row indices of tuples
GrB_Index *J_out, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *p_nvals, // I,J,X size on input; # tuples on output
const GB_Type_code xcode, // type of array X
const GrB_Matrix A, // matrix to extract tuples from
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GB_void *GB_RESTRICT X_bitmap = NULL ;
int64_t *GB_RESTRICT Ap = NULL ;
ASSERT_MATRIX_OK (A, "A to extract", GB0) ;
ASSERT (p_nvals != NULL) ;
// delete any lingering zombies and assemble any pending tuples;
// allow A to remain jumbled
GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ;
GB_BURBLE_DENSE (A, "(A %s) ") ;
ASSERT (xcode <= GB_UDT_code) ;
const GB_Type_code acode = A->type->code ;
// xcode and A must be compatible
if (!GB_code_compatible (xcode, acode))
{
return (GrB_DOMAIN_MISMATCH) ;
}
const int64_t anz = GB_NNZ (A) ;
if (anz == 0)
{
// no work to do
(*p_nvals) = 0 ;
return (GrB_SUCCESS) ;
}
int64_t nvals = *p_nvals ; // size of I,J,X on input
if (nvals < anz && (I_out != NULL || J_out != NULL || X != NULL))
{
// output arrays are not big enough
return (GrB_INSUFFICIENT_SPACE) ;
}
const size_t asize = A->type->size ;
//-------------------------------------------------------------------------
// determine the number of threads to use
//-------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + A->nvec, chunk, nthreads_max) ;
//-------------------------------------------------------------------------
// handle the CSR/CSC format
//--------------------------------------------------------------------------
GrB_Index *I, *J ;
if (A->is_csc)
{
I = I_out ;
J = J_out ;
}
else
{
I = J_out ;
J = I_out ;
}
//--------------------------------------------------------------------------
// bitmap case
//--------------------------------------------------------------------------
if (GB_IS_BITMAP (A))
{
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
bool need_typecast = (X != NULL) && (xcode != acode) ;
if (need_typecast)
{
// X must be typecasted
int64_t anzmax = GB_IMAX (anz, 1) ;
X_bitmap = GB_MALLOC (anzmax * asize, GB_void) ;
}
Ap = GB_MALLOC (A->vdim+1, int64_t) ;
if (Ap == NULL || (need_typecast && X_bitmap == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// extract the tuples
//----------------------------------------------------------------------
// TODO: pass xcode to GB_convert_bitmap_worker and let it do the
// typecasting. This works for now, however.
GB_OK (GB_convert_bitmap_worker (Ap, I, J,
need_typecast ? X_bitmap : X, NULL, A, Context)) ;
//----------------------------------------------------------------------
// typecast the result if needed
//----------------------------------------------------------------------
if (need_typecast)
{
// typecast the values from X_bitmap into X
GB_cast_array ((GB_void *) X, xcode, X_bitmap,
acode, NULL, asize, anz, nthreads) ;
}
}
else
{
//----------------------------------------------------------------------
// sparse, hypersparse, or full case
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// extract the row indices
//----------------------------------------------------------------------
if (I != NULL)
{
if (A->i == NULL)
{
// A is full; construct the row indices
int64_t avlen = A->vlen ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
I [p] = (p % avlen) ;
}
}
else
{
GB_memcpy (I, A->i, anz * sizeof (int64_t), nthreads) ;
}
}
//----------------------------------------------------------------------
// extract the column indices
//----------------------------------------------------------------------
if (J != NULL)
{
if (!GB_extract_vector_list ((int64_t *) J, A, nthreads))
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
}
//----------------------------------------------------------------------
// extract the values
//----------------------------------------------------------------------
if (X != NULL)
{
// typecast or copy the values from A into X
GB_cast_array ((GB_void *) X, xcode, (GB_void *) A->x,
acode, NULL, asize, anz, nthreads) ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
*p_nvals = anz ; // number of tuples extracted
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
GB_binop__plus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int32)
// A*D function (colscale): GB (_AxD__plus_int32)
// D*A function (rowscale): GB (_DxB__plus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int32)
// C=scalar+B GB (_bind1st__plus_int32)
// C=scalar+B' GB (_bind1st_tran__plus_int32)
// C=A+scalar GB (_bind2nd__plus_int32)
// C=A'+scalar GB (_bind2nd_tran__plus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mat_mul.c | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
#include <omp.h>
#include <inttypes.h>
static int plop = 0;
int var_globale[1024];
void mat_mul(double**A, double**B, double**C, int n) {
int i, j, k;
var_globale[0]++;
#pragma omp parallel
{
int cur_thread = omp_get_thread_num();
struct timespec t;
clock_gettime(CLOCK_MONOTONIC_RAW, &t);
uint64_t cur_time = t.tv_sec*1e9 + t.tv_nsec;
printf("[%d] cur_time=%"PRIu64" \n", cur_thread, cur_time);
#pragma omp for
for(int i=0; i<n; i++) {
for(int j=0; j<n; j++) {
C[i][j] = 0;
for(int k=0; k<n; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
clock_gettime(CLOCK_MONOTONIC_RAW, &t);
cur_time = t.tv_sec*1e9 + t.tv_nsec;
printf("[%d] cur_time=%"PRIu64" \n", cur_thread, cur_time);
}
}
double** alloc_matrix(int size) {
double**res = malloc(sizeof(double*)*size);
int i;
for(i=0; i<size; i++)
res[i] = malloc(sizeof(double)*size);
return res;
}
void free_mat(double**mat, int n) {
int i;
for(i=0; i<n; i++) {
free(mat[i]);
}
free(mat);
}
void init_mat(double**A, int n) {
int i, j;
for(i=0; i<n; i++) {
for(j=0; j<n; j++) {
A[i][j] = (i+j)%10;
}
}
}
void print_mat(double** C, int n) {
#if 0
int i, j;
for(i=0; i<n; i++) {
for(j=0; j<n; j++) {
printf("%lf ", C[i][j]);
}
printf("\n");
}
#endif
}
int main(int argc, char** argv) {
int i;
int n = 100;
if(argc>1) {
n = atoi(argv[1]);
}
printf("Matrix size: %d\n", n);
#if 1
double **A = malloc(sizeof(double*)*n);
for(i=0; i<n; i++)
A[i] = malloc(sizeof(double)*n);
double **B = malloc(sizeof(double*)*n);
for(i=0; i<n; i++)
B[i] = malloc(sizeof(double)*n);
double **C = malloc(sizeof(double*)*n);
for(i=0; i<n; i++)
C[i] = malloc(sizeof(double)*n);
#else
double **A = alloc_matrix(n);
double **B = alloc_matrix(n);
double **C = alloc_matrix(n);
#endif
init_mat(A, n);
init_mat(B, n);
printf("Start computing\n");
struct timespec t1, t2;
clock_gettime(CLOCK_REALTIME, &t1);
mat_mul(A, B, C, n);
clock_gettime(CLOCK_REALTIME, &t2);
double duration = ((t2.tv_sec-t1.tv_sec)*1e9+(t2.tv_nsec-t1.tv_nsec))/1e9;
printf("Computation took %lf s\n", duration);
print_mat(C, n);
//print_mat(B, n);
free_mat(A, n);
free_mat(B, n);
free_mat(C, n);
return 0;
}
|
GB_unaryop__lnot_int16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_uint32
// op(A') function: GB_tran__lnot_int16_uint32
// C type: int16_t
// A type: uint32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_uint32
(
int16_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Inpaint_simple_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Inpaint_simple_core.h"
#include "utils.h"
/* C-OMP implementation of simple inpainting shemes
* inpainting using averaged interface values
*
* Input Parameters:
* 1. Image/volume to inpaint
* 2. Mask of the same size as (1) in 'unsigned char' format (ones mark the region to inpaint, zeros belong to the data)
* 3. Iterations number
* 3. sigma - controlling parameter to start inpainting
*
* Output:
* [1] Inpainted image/volume
*/
int Inpaint_simple_CPU_main(float *Input, unsigned char *Mask, float *Output, unsigned char *M_upd, int iterations, int W_halfsize, float sigma, int dimX, int dimY, int dimZ)
{
long i, j, k, i1, j1, k1, l, countmask;
float *minmax_array;
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
/* copying M to Mask_upd */
copyIm_unchar(Mask, M_upd, dimX, dimY, dimZ);
minmax_array = (float*) calloc (2,sizeof(float));
max_val_mask(Input, M_upd, minmax_array, (long)(dimX), (long)(dimY), (long)(dimZ));
countmask = 0;
for (k=0; k<dimY*dimX*dimZ; k++) if (M_upd[k] == 1) countmask++;
if (countmask == 0) printf("%s \n", "Nothing to inpaint, zero mask!");
else {
if (dimZ == 1) {
#pragma omp parallel for shared(Input,M_upd) private(i,j)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
scaling_func(Input, M_upd, Output, sigma, minmax_array, i, j, 0l, (long)(dimX), (long)(dimY), 0l); /* scaling function */
}}
for (l=0; l<iterations; l++) {
#pragma omp parallel for shared(Input,M_upd) private(i1,j1)
for(i1=0; i1<dimX; i1++) {
for(j1=0; j1<dimY; j1++) {
mean_inp_2D(Input, M_upd, Output, sigma, W_halfsize, i1, j1, (long)(dimX), (long)(dimY)); /* smoothing of the mask */
}}
}
}
else {
/* 3D version */
#pragma omp parallel for shared(Input,M_upd) private(i,j,k)
for(k=0; k<dimZ; k++) {
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
scaling_func(Input, M_upd, Output, sigma, minmax_array, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ)); /* scaling function */
}}}
for (l=0; l<iterations; l++) {
#pragma omp parallel for shared(Input,M_upd) private(i1,j1,k1)
for(k1=0; k1<dimZ; k1++) {
for(i1=0; i1<dimX; i1++) {
for(j1=0; j1<dimY; j1++) {
mean_inp_3D(Input, M_upd, Output, sigma, W_halfsize, i1, j1, k1, (long)(dimX), (long)(dimY), (long)(dimZ)); /* smoothing of the mask */
}}}
}
}
}
free(minmax_array);
return 0;
}
/********************************************************************/
/**************************COMMON function***************************/
/********************************************************************/
void scaling_func(float *Input, unsigned char *M_upd, float *Output, float sigma, float *minmax_array, long i, long j, long k, long dimX, long dimY, long dimZ)
{
long index;
index = (dimX*dimY)*k + j*dimX+i;
/* scaling according to the max value in the mask */
if (M_upd[index] == 1) Output[index] = sigma*(Input[index]/minmax_array[1]);
return;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
/*mean smoothing of the inapainted values inside and in the viscinity of the mask */
void mean_inp_2D(float *Input, unsigned char *M_upd, float *Output, float sigma, int W_halfsize, long i, long j, long dimX, long dimY)
{
long i_m, j_m, i1, j1, index, index2, switcher, counter;
float sum_val;
index = j*dimX+i;
sum_val = 0.0f; switcher = 0; counter = 0;
for(i_m=-W_halfsize; i_m<=W_halfsize; i_m++) {
i1 = i+i_m;
if ((i1 < 0) || (i1 >= dimX)) i1 = i;
for(j_m=-W_halfsize; j_m<=W_halfsize; j_m++) {
j1 = j+j_m;
if ((j1 < 0) || (j1 >= dimY)) j1 = j;
index2 = j1*dimX + i1;
if (M_upd[index2] == 1) switcher = 1;
sum_val += Output[index2];
counter++;
}}
if (switcher == 1) Output[index] = sum_val/counter;
return;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
/*mean smoothing of the inapainted values inside and in the viscinity of the mask */
void mean_inp_3D(float *Input, unsigned char *M_upd, float *Output, float sigma, int W_halfsize, long i, long j, long k, long dimX, long dimY, long dimZ)
{
long i_m, j_m, k_m, i1, j1, k1, index, index2, switcher, counter;
float sum_val;
index = (dimX*dimY)*k + j*dimX+i;
sum_val = 0.0f; switcher = 0; counter = 0;
for(k_m=-W_halfsize; k_m<=W_halfsize; k_m++) {
k1 = k+k_m;
if ((k1 < 0) || (k1 >= dimZ)) k1 = k;
for(i_m=-W_halfsize; i_m<=W_halfsize; i_m++) {
i1 = i+i_m;
if ((i1 < 0) || (i1 >= dimX)) i1 = i;
for(j_m=-W_halfsize; j_m<=W_halfsize; j_m++) {
j1 = j+j_m;
if ((j1 < 0) || (j1 >= dimY)) j1 = j;
index2 = (dimX*dimY)*k1 + j1*dimX+i1;
if (M_upd[index2] == 1) switcher = 1;
sum_val += Output[index2];
counter++;
}}}
if (switcher == 1) Output[index] = sum_val/counter;
return;
}
|
GB_binop__copysign_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp64)
// C=scalar+B GB (_bind1st__copysign_fp64)
// C=scalar+B' GB (_bind1st_tran__copysign_fp64)
// C=A+scalar GB (_bind2nd__copysign_fp64)
// C=A'+scalar GB (_bind2nd_tran__copysign_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = copysign (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = copysign (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COPYSIGN || GxB_NO_FP64 || GxB_NO_COPYSIGN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__copysign_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__copysign_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__copysign_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = copysign (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__copysign_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = copysign (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = copysign (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__copysign_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = copysign (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.