source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
FullyDistVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.5 -------------------------------------------------*/
/* date: 10/09/2015 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc, Adam Lugowski ------------*/
/****************************************************************/
/*
Copyright (c) 2010-2015, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a std::copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, std::copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_VEC_H_
#define _FULLY_DIST_VEC_H_
#include<iostream>
#include <fstream>
#include<vector>
#include <utility>
#include <iterator>
#include "CombBLAS.h"
#include "CommGrid.h"
#include "FullyDist.h"
#include "Exception.h"
namespace combblas {
template <class IT, class NT>
class FullyDistSpVec;
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class DenseVectorLocalIterator;
template <typename RETT, typename NU1, typename NU2, typename BINOP>
class EWiseExtToPlainAdapter;
// ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices;
// therefore it can not be 32-bits, in general.
template <class IT, class NT>
class FullyDistVec: public FullyDist<IT,NT, typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type >
{
public:
FullyDistVec ( );
FullyDistVec ( IT globallen, NT initval);
FullyDistVec ( std::shared_ptr<CommGrid> grid);
FullyDistVec ( std::shared_ptr<CommGrid> grid, IT globallen, NT initval);
FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor
FullyDistVec ( const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator
FullyDistVec ( const std::vector<NT> & fillarr, std::shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a std::vector from each processor
template <class ITRHS, class NTRHS>
FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); }
template <class ITRHS, class NTRHS>
FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion
FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator
FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = fixedval;
return *this;
}
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref
//! like operator=, but instead of making a deep std::copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
FullyDistVec<IT,NT> & stealFrom(FullyDistVec<IT,NT> & victim);
FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs);
bool operator==(const FullyDistVec<IT,NT> & rhs) const;
void SetElement (IT indx, NT numx); // element-wise assignment
void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index
NT GetElement (IT indx) const; // element-wise fetch
NT operator[](IT indx) const // more c++ like API
{
return GetElement(indx);
}
void Set(const FullyDistSpVec< IT,NT > & rhs);
template <class NT1, typename _BinaryOperationIdx, typename _BinaryOperationVal>
void GSet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win);
template <class NT1, typename _BinaryOperationIdx>
FullyDistSpVec<IT,NT> GGet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, NT nullValue);
void iota(IT globalsize, NT first);
void RandPerm(); // randomly permute the std::vector
FullyDistVec<IT,IT> sort(); // sort and return the permutation
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized
template <typename _Predicate>
FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true
FullyDistSpVec<IT,NT> Find(NT val) const; //!< Return the elements val is found
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
std::transform(arr.begin(), arr.end(), arr.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = __binary_op(arr[i], i + offset);
}
template <typename _UnaryOperation, typename IRRELEVANT_NT>
void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask);
// extended callback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp);
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp);
// plain fallback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
true);
}
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
applyNulls, nullValue, true);
}
template <typename T1, typename T2>
class retTrue {
public:
bool operator()(const T1& x, const T2& y)
{
return true;
}
};
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>());
}
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue);
}
void PrintToFile(std::string prefix)
{
std::ofstream output;
commGrid->OpenDebugFile(prefix, output);
std::copy(arr.begin(), arr.end(), std::ostream_iterator<NT> (output, " "));
output << std::endl;
output.close();
}
void PrintInfo(std::string vectorname) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
std::pair<IT, NT> MinElement() const; // returns <index, value> std::pair of global minimum
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT identity) const; //! Reduce can be used to implement max_element, for instance
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void SelectCandidates(double nver);
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
std::vector<NT> data() { return arr; }
const std::vector<NT> data() const { return arr; }
// TODO: cache
IT getTotalLength() {
IT v = arr.size();
IT r;
MPI_Allreduce(&v, &r, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
return r;
}
template <typename _BinaryOperation>
void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op);
private:
std::vector< NT > arr;
template <class IU, class NU>
friend class DenseParMat;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class DenseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x );
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename IU, typename NU1, typename NU2, typename _BinaryOperation>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
template <typename IU, typename NU>
friend FullyDistVec<IU,NU> Concatenate ( std::vector< FullyDistVec<IU,NU> > & vecs);
template <typename IU, typename NU>
friend void Augment (FullyDistVec<int64_t, int64_t>& mateRow2Col, FullyDistVec<int64_t, int64_t>& mateCol2Row,
FullyDistVec<int64_t, int64_t>& parentsRow, FullyDistVec<int64_t, int64_t>& leaves);
template <class IU, class DER>
friend SpParMat<IU, bool, DER> PermMat (const FullyDistVec<IU,IU> & ri, const IU ncol);
friend void maximumMatching(SpParMat < int64_t, bool, SpDCCols<int32_t,bool> > & A, FullyDistVec<int64_t, int64_t>& mateRow2Col,FullyDistVec<int64_t, int64_t>& mateCol2Row);
};
}
#include "FullyDistVec.cpp"
#endif
|
tarjan_scc.h | /*
* tarjan_scc.h
* LLAMA Graph Analytics
*
* Copyright 2014
* The President and Fellows of Harvard College.
*
* Copyright 2014
* Oracle Labs.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LL_GENERATED_CPP_TARJAN_SCC_H
#define LL_GENERATED_CPP_TARJAN_SCC_H
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <limits.h>
#include <cmath>
#include <algorithm>
#include <omp.h>
#include "llama/ll_dfs_template.h"
#include "llama/ll_writable_graph.h"
#include "llama/ll_seq.h"
#include "benchmarks/benchmark.h"
// BFS/DFS definitions for the procedure
template <class Graph>
class Tarjan_dfs : public ll_dfs_template
<Graph, true, true, true, false>
{
public:
Tarjan_dfs(Graph& _G, node_t*& _G_SCC, bool*& _G_InStack,
node_t*& _G_LowLink, ll_node_seq_vec& _Stack, node_t& _n)
: ll_dfs_template<Graph, true, true, true, false>(_G),
G(_G), G_SCC(_G_SCC), G_InStack(_G_InStack), G_LowLink(_G_LowLink),
Stack(_Stack), n(_n){}
private: // list of varaibles
Graph& G;
node_t*& G_SCC;
bool*& G_InStack;
node_t*& G_LowLink;
ll_node_seq_vec& Stack;
node_t& n;
protected:
virtual void visit_pre(node_t t)
{
G.set_node_prop(G_InStack, t, true);
Stack.push_back(t);
G.set_node_prop(G_LowLink, t, t);
}
virtual void visit_post(node_t t)
{
ll_edge_iterator iter;
G.out_iter_begin(iter, t);
FOREACH_OUTEDGE_ITER(k_idx, G, iter) {
node_t k = iter.last_node;
if (G_LowLink[k] < G_LowLink[t])
G.set_node_prop(G_LowLink, t, G_LowLink[k]);
}
if (G_LowLink[t] == t)
{
node_t w;
w = Stack.pop_back() ;
while (w != t)
{
G.set_node_prop(G_InStack, w, false);
G.set_node_prop(G_SCC, w, t);
w = Stack.pop_back() ;
}
G.set_node_prop(G_InStack, w, false);
G.set_node_prop(G_SCC, w, t);
}
}
virtual bool check_navigator(node_t t, edge_t t_idx)
{
return ( !G_InStack[t]);
}
};
/**
* Tarjan's SCC
*/
template <class Graph>
class ll_b_tarjan_scc : public ll_benchmark<Graph> {
node_t* G_SCC;
public:
/**
* Create the benchmark
*/
ll_b_tarjan_scc() : ll_benchmark<Graph>("Tarjan's SCC") {
this->create_auto_array_for_nodes(G_SCC);
}
/**
* Destroy the benchmark
*/
virtual ~ll_b_tarjan_scc(void) {
}
/**
* Run the benchmark
*
* @return the numerical result, if applicable
*/
virtual double run(void) {
//Initializations
Graph& G = *this->_graph;
ll_memory_helper m;
ll_node_seq_vec Stack(omp_get_max_threads());
bool* G_InStack = m.allocate<bool>(G.max_nodes());
node_t* G_LowLink = m.allocate<node_t>(G.max_nodes());
#pragma omp parallel for
for (node_t t0 = 0; t0 < G.max_nodes(); t0 ++)
{
G.set_node_prop(G_SCC, t0, LL_NIL_NODE);
G.set_node_prop(G_InStack, t0, false);
}
for (node_t n = 0; n < G.max_nodes(); n ++)
{
if (G_SCC[n] == LL_NIL_NODE)
{
Tarjan_dfs<Graph> _DFS(G, G_SCC, G_InStack, G_LowLink,
Stack, n);
_DFS.prepare(n);
_DFS.do_dfs();
}
}
return 0;
}
/**
* Finalize the benchmark
*
* @return the updated numerical result, if applicable
*/
virtual double finalize(void) {
node_t max = 0;
for (node_t n = 0; n < this->_graph->max_nodes(); n++) {
if (G_SCC[n] > max) max = G_SCC[n];
}
return max;
}
/**
* Print the results
*
* @param f the output file
*/
virtual void print_results(FILE* f) {
print_results_part(f, this->_graph, G_SCC);
}
};
#endif
|
kmean_par.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "immintrin.h"
#include "pack.h"
#include "kernel.h"
#include <string.h>
#include "rdtsc.h"
//#define DEBUG 1
//#define KERNEL 1
#define KERNEL_P_SIZE 8
#define KERNEL_D_SIZE 4
#define KERNEL_C_SIZE 4
int runs = 1000;
int psize; // number of points
int dsize; // number of dimensions
int k; // number of centers
int iterations;
int t_num;
int main(int argc, char *argv[])
{
if (argc != 6)
{
printf("Wrong arguments!");
}
iterations = atoi(argv[1]);
k = atoi(argv[2]);
dsize = atoi(argv[3]);
psize = atoi(argv[4]);
t_num = atoi(argv[5]);
double *points;
double *centers;
double *packed_points;
double *packed_centers;
double *new_centers;
int *new_centers_size;
//double *final_labels;
posix_memalign((void **)&points, 64, dsize * psize * sizeof(double));
posix_memalign((void **)¢ers, 64, k * dsize * sizeof(double));
posix_memalign((void **)&packed_points, 64, dsize * psize * sizeof(double));
posix_memalign((void **)&packed_centers, 64, dsize * psize * sizeof(double));
posix_memalign((void **)&new_centers, 64, k * dsize * sizeof(double));
posix_memalign((void **)&new_centers_size, 64, k * sizeof(int));
//posix_memalign((void **)&final_labels, 64, psize * sizeof(double));
// initialize points and centers
read_points_and_centers(points, centers, dsize, psize, k);
// pack the data into panel order
pack_points(points, packed_points, dsize, psize);
memcpy(new_centers, centers, k * dsize * sizeof(double));
unsigned long long t0, t1;
unsigned long long sum1 = 0;
// kmeans algorithm
for (int r = 0; r < runs; ++r) {
t0 = rdtsc();
for (int i = 0; i < iterations; ++i)
{
// initialize new centers to 0
pack_centers(new_centers, packed_centers, dsize, k);
memset(new_centers, 0, k * dsize * sizeof(double));
memset(new_centers_size, 0, k * sizeof(int));
int p_loop = psize / KERNEL_P_SIZE;
#pragma omp parallel for num_threads(t_num)
for (int p_i = 0; p_i < p_loop; ++p_i)
{
// set the proper initialize value
// 8 registers reserved for intermediate distance result
int p = p_i * KERNEL_P_SIZE;
__m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
double tmp_cluster_arr_g1[4] = {0.0, 0.0, 0.0, 0.0};
double tmp_min_dist_arr_g1[4] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
double tmp_cluster_arr_g2[4] = {0.0, 0.0, 0.0, 0.0};
double tmp_min_dist_arr_g2[4] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
for (int c = 0; c < k; c += KERNEL_C_SIZE) // 4 centers computed in each kernel
{
// 8 registers to store intermediate distances
ymm0 = _mm256_setzero_pd();
ymm1 = _mm256_setzero_pd();
ymm2 = _mm256_setzero_pd();
ymm3 = _mm256_setzero_pd();
ymm4 = _mm256_setzero_pd();
ymm5 = _mm256_setzero_pd();
ymm6 = _mm256_setzero_pd();
ymm7 = _mm256_setzero_pd();
for (int d = 0; d < dsize; d += KERNEL_D_SIZE) // 4 dimension in each kernel
{
// apply the kernel on 8 * 4 points matrix and 4 * 4 on centers matrix
kernel(KERNEL_D_SIZE, KERNEL_P_SIZE, &ymm0, &ymm1, &ymm2, &ymm3, &ymm4, &ymm5, &ymm6, &ymm7,
packed_points + p * dsize + d * KERNEL_P_SIZE, packed_centers + c * dsize + d * KERNEL_C_SIZE);
}
findCluster(&ymm0, &ymm1, &ymm2, &ymm3, &ymm4, &ymm5, &ymm6, &ymm7,
tmp_cluster_arr_g1, tmp_min_dist_arr_g1, tmp_cluster_arr_g2, tmp_min_dist_arr_g2, c * 1.0);
}
// update the cluster
#pragma omp critical
{
updateCluster(tmp_cluster_arr_g1, tmp_cluster_arr_g2, points + p * dsize, new_centers, psize, dsize, new_centers_size);
}
}
// Compute the new centers
computeNewCluster(new_centers, new_centers_size, dsize, k);
}
t1 = rdtsc();
sum1 += (t1 - t0);
}
// print new centers
for (int m = 0; m < dsize * k; m++) {
printf("%lf ", new_centers[m]);
if (m % dsize == dsize - 1) {
printf("\n");
}
}
printf("Execution cycles: %lld, times: %lf seconds, FLOPS/cycle: %lf\n",
sum1, (double) sum1 / (double) PROCESSOR_FREQ,
(double) (iterations * (psize * k * dsize * 3 + 2 * psize * k + psize * dsize)) / ((double) (sum1 / runs)));
}
|
kernel_tti_so4.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r34)[y_size + 1][z_size + 1];
posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r35)[y_size + 1][z_size + 1];
posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
for (int time = time_m, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= time_M; time += 1, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3))
{
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, t0, x0_blk0_size, x_M - (x_M - x_m + 2) % (x0_blk0_size), x_m - 1, y0_blk0_size, y_M - (y_M - y_m + 2) % (y0_blk0_size), y_m - 1, z_M, z_m, nthreads);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, t0, x0_blk0_size, x_M - (x_M - x_m + 2) % (x0_blk0_size), x_m - 1, (y_M - y_m + 2) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 2) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, t0, (x_M - x_m + 2) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 2) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 2) % (y0_blk0_size), y_m - 1, z_M, z_m, nthreads);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, t0, (x_M - x_m + 2) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 2) % (x0_blk0_size) + 1, (y_M - y_m + 2) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 2) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x1_blk0_size, x_M - (x_M - x_m + 1) % (x1_blk0_size), x_m, y1_blk0_size, y_M - (y_M - y_m + 1) % (y1_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x1_blk0_size, x_M - (x_M - x_m + 1) % (x1_blk0_size), x_m, (y_M - y_m + 1) % (y1_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y1_blk0_size) + 1, z_M, z_m, sp_zi_m, nthreads);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, (x_M - x_m + 1) % (x1_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x1_blk0_size) + 1, y1_blk0_size, y_M - (y_M - y_m + 1) % (y1_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, (x_M - x_m + 1) % (x1_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x1_blk0_size) + 1, (y_M - y_m + 1) % (y1_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y1_blk0_size) + 1, z_M, z_m, sp_zi_m, nthreads);
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
/*
for (int time = time_m, t1 = (time + 1) % (3); time <= time_M; time += 1, t1 = (time + 1) % (3))
{
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
Begin section2
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F / 3.0F) * (x_M - x_m + 1) / nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic, chunk_size)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
}
}
}
End section2
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec - start_section2.tv_sec) + (double)(end_section2.tv_usec - start_section2.tv_usec) / 1000000;
}
*/
free(r21);
free(r20);
free(r19);
free(r18);
free(r17);
free(r34);
free(r35);
return 0;
}
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1)
{
for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -v[t0][x + 4][y + 4][z + 4];
r35[x + 1][y + 1][z + 1] = 1.0e-1F * (-(r39 + v[t0][x + 4][y + 4][z + 5]) * r18[x + 1][y + 1][z + 1] - (r39 + v[t0][x + 4][y + 5][z + 4]) * r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] - (r39 + v[t0][x + 5][y + 4][z + 4]) * r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1]);
float r40 = -u[t0][x + 4][y + 4][z + 4];
r34[x + 1][y + 1][z + 1] = 1.0e-1F * (-(r40 + u[t0][x + 4][y + 4][z + 5]) * r18[x + 1][y + 1][z + 1] - (r40 + u[t0][x + 4][y + 5][z + 4]) * r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] - (r40 + u[t0][x + 5][y + 4][z + 4]) * r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x1_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x1_blk0 = x_m; x1_blk0 <= x_M; x1_blk0 += x1_blk0_size)
{
for (int y1_blk0 = y_m; y1_blk0 <= y_M; y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= x1_blk0 + x1_blk0_size - 1; x += 1)
{
for (int y = y1_blk0; y <= y1_blk0 + y1_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x + 1][y + 1][z] * r35[x + 1][y + 1][z] - r18[x + 1][y + 1][z + 1] * r35[x + 1][y + 1][z + 1] + r19[x + 1][y][z + 1] * r20[x + 1][y][z + 1] * r35[x + 1][y][z + 1] - r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] * r35[x + 1][y + 1][z + 1] + r20[x][y + 1][z + 1] * r21[x][y + 1][z + 1] * r35[x][y + 1][z + 1] - r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1] * r35[x + 1][y + 1][z + 1];
float r43 = pow(vp[x + 4][y + 4][z + 4], -2);
float r42 = 1.0e-1F * (-r18[x + 1][y + 1][z] * r34[x + 1][y + 1][z] + r18[x + 1][y + 1][z + 1] * r34[x + 1][y + 1][z + 1] - r19[x + 1][y][z + 1] * r20[x + 1][y][z + 1] * r34[x + 1][y][z + 1] + r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] * r34[x + 1][y + 1][z + 1] - r20[x][y + 1][z + 1] * r21[x][y + 1][z + 1] * r34[x][y + 1][z + 1] + r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1] * r34[x + 1][y + 1][z + 1]) - 8.33333315e-4F * (u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 1.3333333e-2F * (u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 7.49999983e-2F * u[t0][x + 4][y + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x + 1][y + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x + 4][y + 4][z + 4] + u[t2][x + 4][y + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x + 4][y + 4][z + 4] + v[t2][x + 4][y + 4][z + 4]);
u[t1][x + 4][y + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x + 4][y + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x + 1][y + 1][z + 1] + r46 * (damp[x + 1][y + 1][z + 1] * u[t0][x + 4][y + 4][z + 4]));
v[t1][x + 4][y + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x + 1][y + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x + 1][y + 1][z + 1] * v[t0][x + 4][y + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x][y] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x][y][sp_zi];
float r22 = save_src_u[time][source_id[x][y][zind]] * source_mask[x][y][zind];
#pragma omp atomic update
u[t1][x + 4][y + 4][zind + 4] += r22;
float r23 = save_src_v[time][source_id[x][y][zind]] * source_mask[x][y][zind];
#pragma omp atomic update
v[t1][x + 4][y + 4][zind + 4] += r23;
}
}
}
}
}
}
}
/* Backdoor edit at Wed Sep 9 19:03:00 2020*/
|
mixed_tentusscher_myo_epi_2004_S2_13.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_13.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
Diffusion_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Diffusion_core.h"
#include "utils.h"
#define EPS 1.0e-5
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int signNDFc(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of linear and nonlinear diffusion with the regularisation model [1,2] (2D/3D case)
* The minimisation is performed using explicit scheme.
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambda - regularization parameter
* 3. Edge-preserving parameter (sigma), when sigma equals to zero nonlinear diffusion -> linear diffusion
* 4. Number of iterations, for explicit scheme >= 150 is recommended
* 5. tau - time-marching step for explicit scheme
* 6. Penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight
* 7. eplsilon - tolerance constant
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Perona, P. and Malik, J., 1990. Scale-space and edge detection using anisotropic diffusion. IEEE Transactions on pattern analysis and machine intelligence, 12(7), pp.629-639.
* [2] Black, M.J., Sapiro, G., Marimont, D.H. and Heeger, D., 1998. Robust anisotropic diffusion. IEEE Transactions on image processing, 7(3), pp.421-432.
*/
float Diffusion_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int penaltytype, float epsil, int dimX, int dimY, int dimZ)
{
int i;
float sigmaPar2, *Output_prev=NULL;
sigmaPar2 = sigmaPar/sqrt(2.0f);
long j, DimTotal;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
DimTotal = (long)(dimX*dimY*dimZ);
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ == 1) {
/* running 2D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff2D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY)); /* linear diffusion (heat equation) */
else NonLinearDiff2D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY)); /* nonlinear diffusion */
}
else {
/* running 3D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff3D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
else NonLinearDiff3D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY), (long)(dimZ));
}
/* check early stopping criteria if epsilon not equal zero */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
/* stop if the norm residual is less than the tolerance EPS */
if (re < epsil) count++;
if (count > 3) break;
}
}
free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff2D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/* nonlinear diffusion */
float NonLinearDiff2D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 = (e1)/(1.0f + powf((e1/sigmaPar),2));
w1 = (w1)/(1.0f + powf((w1/sigmaPar),2));
n1 = (n1)/(1.0f + powf((n1/sigmaPar),2));
s1 = (s1)/(1.0f + powf((s1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2 or 3.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff3D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
float NonLinearDiff3D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
if (fabs(u1) > sigmaPar) u1 = signNDFc(u1);
else u1 = u1/sigmaPar;
if (fabs(d1) > sigmaPar) d1 = signNDFc(d1);
else d1 = d1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 = (e1)/(1.0f + powf((e1/sigmaPar),2));
w1 = (w1)/(1.0f + powf((w1/sigmaPar),2));
n1 = (n1)/(1.0f + powf((n1/sigmaPar),2));
s1 = (s1)/(1.0f + powf((s1/sigmaPar),2));
u1 = (u1)/(1.0f + powf((u1/sigmaPar),2));
d1 = (d1)/(1.0f + powf((d1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
if (fabs(u1) <= sigmaPar) u1 = u1*powf((1.0f - powf((u1/sigmaPar),2)), 2);
else u1 = 0.0f;
if (fabs(d1) <= sigmaPar) d1 = d1*powf((1.0f - powf((d1/sigmaPar),2)), 2);
else d1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2 or 3.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
|
AdaptiveAndFastCFAR.h | #pragma once
#include <math.h>
#include <omp.h>
#include <opencv2\opencv.hpp>
using namespace cv;
using namespace std;
class AdaptiveAndFastCFAR : public AbstractCFAR {
public:
AdaptiveAndFastCFAR()
{
}
virtual ~AdaptiveAndFastCFAR()
{
}
virtual AbstractCFAR* clone()
{
return new AdaptiveAndFastCFAR;
}
virtual Mat execute(Mat image, double probabilityOfFalseAlarm, map<string, double>& parameters)
{
const int guardRadius = (int)getParameterValue(parameters, "AAF-CFAR.guardRadius", 5);
const int clutterRadius = (int)getParameterValue(parameters, "AAF-CFAR.clutterRadius", 5);
const double censoringPercentile = getParameterValue(parameters, "AAF-CFAR.censoringPercentile", 99.9);
Mat targetImage(image.rows, image.cols, CV_8UC1, Scalar(0));
const int windowRadius = guardRadius + clutterRadius;
const int startIndex = 1;
Mat histogram = ImageUtilities::createHistogram(image, startIndex);
const double globalTargetThreshold = sqr(ImageUtilities::getPercentileIndex<int>(histogram, censoringPercentile / 100.0));
Mat powerImage = createPowerImage(image);
const int threadCount = min(getThreadCount(), image.rows);
double* powerImageRow;
unsigned char* targetImageRow;
double* powerImageWindowRow;
bool fullWindowUpdate;
double sumI, sumI2, meanI, meanI2, alpha, gamma;
int x, y, xx, yy, xa, ya, sumC;
#pragma omp parallel private(sumI, sumI2, sumC) num_threads(threadCount)
{
#pragma omp for private(x, y, xx, yy, xa, ya, powerImageRow, targetImageRow, powerImageWindowRow, fullWindowUpdate, meanI, meanI2, alpha, gamma)
for (y = 0; y<image.rows; y++) {
powerImageRow = (double*)(powerImage.data + y * powerImage.step);
targetImageRow = (unsigned char*)(targetImage.data + y * targetImage.step);
for (x = 0; x<image.cols; x++) {
if (x == 0) {
fullWindowUpdate = true;
sumI = 0.0;
sumI2 = 0.0;
sumC = 0;
}
if (fullWindowUpdate) {
fullWindowUpdate = false;
for (yy = -windowRadius; yy <= windowRadius; yy++) {
ya = y + yy;
if (ya >= 0 && ya < image.rows) {
powerImageWindowRow = (double*)(powerImage.data + ya * powerImage.step);
for (xx = -windowRadius; xx <= windowRadius; xx++) {
if (xx == -guardRadius && yy >= -guardRadius && yy <= guardRadius)
xx = guardRadius;
else {
xa = x + xx;
if (xa >= 0 && xa < image.cols && powerImageWindowRow[xa] > 0 && powerImageWindowRow[xa] < globalTargetThreshold) {
sumI += powerImageWindowRow[xa];
sumI2 += sqr(powerImageWindowRow[xa]);
sumC++;
}
}
}
}
}
}
else {
for (yy = -windowRadius; yy <= windowRadius; yy++) {
ya = y + yy;
if (ya >= 0 && ya < image.rows) {
powerImageWindowRow = (double*)(powerImage.data + ya * powerImage.step);
// remove
xa = x - windowRadius - 1;
if (xa >= 0 && xa < image.cols && powerImageWindowRow[xa] > 0 && powerImageWindowRow[xa] < globalTargetThreshold) {
sumI -= powerImageWindowRow[xa];
sumI2 -= sqr(powerImageWindowRow[xa]);
sumC--;
}
// add
xa = x + windowRadius;
if (xa >= 0 && xa < image.cols && powerImageWindowRow[xa] > 0 && powerImageWindowRow[xa] < globalTargetThreshold) {
sumI += powerImageWindowRow[xa];
sumI2 += sqr(powerImageWindowRow[xa]);
sumC++;
}
}
}
for (yy = -guardRadius; yy <= guardRadius; yy++) {
ya = y + yy;
if (ya >= 0 && ya < image.rows) {
powerImageWindowRow = (double*)(powerImage.data + ya * powerImage.step);
// remove
xa = x - guardRadius - 1;
if (xa >= 0 && xa < image.cols && powerImageWindowRow[xa] > 0 && powerImageWindowRow[xa] < globalTargetThreshold) {
sumI += powerImageWindowRow[xa];
sumI2 += sqr(powerImageWindowRow[xa]);
sumC++;
}
// add
xa = x + guardRadius;
if (xa >= 0 && xa < image.cols && powerImageWindowRow[xa] > 0 && powerImageWindowRow[xa] < globalTargetThreshold) {
sumI -= powerImageWindowRow[xa];
sumI2 -= sqr(powerImageWindowRow[xa]);
sumC--;
}
}
}
}
if (sumC > 0) {
meanI = (sumI / sumC);
meanI2 = (sumI2 / sumC);
alpha = -1 - meanI2 / (meanI2 - 2 * sqr(meanI));
gamma = (-alpha - 1) * meanI;
if (powerImageRow[x] > gamma * (pow(probabilityOfFalseAlarm, 1.0 / alpha) - 1)) {
targetImageRow[x] = UCHAR_MAX;
}
}
}
}
}
return targetImage;
}
virtual int getClutterArea(map<string, double>& parameters)
{
const int guardRadius = (int)parameters["AAF-CFAR.guardRadius"];
const int clutterRadius = (int)parameters["AAF-CFAR.clutterRadius"];
const int windowRadius = (guardRadius + clutterRadius);
const int clutterArea = sqr(2 * windowRadius + 1) - sqr(2 * guardRadius + 1);
return clutterArea;
}
virtual int getBandWidth(map<string, double>& parameters)
{
const int guardRadius = (int)parameters["AAF-CFAR.guardRadius"];
const int clutterRadius = (int)parameters["AAF-CFAR.clutterRadius"];
const int windowRadius = (guardRadius + clutterRadius);
return calculateBandSize(windowRadius);
}
virtual bool requiresGlobalHistogram() const { return true; }
private:
Mat createPowerImage(Mat image)
{
Mat powerImage;
image.convertTo(powerImage, CV_64FC1);
for (int y = 0; y < powerImage.rows; y++) {
double* pirow = (double*)(powerImage.data + y * powerImage.step);
for (int x = 0; x < powerImage.cols; x++) {
pirow[x] *= pirow[x];
}
}
return powerImage;
}
};
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_procs;
HYPRE_Int i;
hypre_MPI_Comm_size(comm,&num_procs);
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
if (memory_location_aux == HYPRE_MEMORY_HOST)
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i+1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i+1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, pstart, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
pstart = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[pstart] &&
row_index < row_partitioning[pstart+1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[pstart]);
ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx, pstart;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i=0; i < nrows; i++)
{
counter[i+1] = counter[i]+ncols[i];
}
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
row_size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i=0; i < nrows; i++)
{
ncols[i] = counter[i+1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx]-col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
off_proc_i_h[2*i] = tmp[i];
off_proc_i_h[2*i+1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[i-1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter*2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_big_int = off_proc_j[counter+j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements*2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength*object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array, *offd_array;
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
i_diag = 0;
i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = 0;
i_offd = 0;
for (i=ns; i < ne; i++)
{
local_j = aux_j[i];
local_data = aux_data[i];
diag_pos[i] = -1;
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j]-col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i=ns; i < ne; i++)
{
diag_i[i] = i_diag;
offd_i[i] = i_offd;
local_j = aux_j[i];
local_data = aux_data[i];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j=j0; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i=0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i=0; i < nnz_offd-1; i++)
{
if (tmp_j[i+1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i+1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < nnz_offd; i++)
{
offd_j[i]=hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd);
}
if (base)
{
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
pass.c | /* ~~~ Time Series Analysis -- Auxiliary ~~~
*
* Routines for calculating (band, low, high)-pass filters
*
* Author: Jakob Rørsted Mosumgaard
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "arrlib.h"
#include "window.h"
#include "tsfourier.h"
#define PI2micro 6.28318530717958647692528676655900576839433879875e-6
/* Bandpass filter
*
* Arguments:
* - `time` : Array of times. In seconds!
* - `flux` : Array of data.
* - `weight` : Array of statistical weights.
* - `N` : Length of the time series
* - `f1`, `f2` : Frequency interval of filter (f1 < f2)
* - `low`, `high`: Frequency interval to calculate the spectrum
* - `rate` : Frequency sampling
* - `result` : OUTPUT -- Array containing filtered data
* - `useweight` : Flag to signal whether to use weights or not (0 = no weights)
* - `quiet` : Flag. 0 = verbose output. 1 = no output to console
*/
void bandpass(double time[], double flux[], double weight[], size_t N,\
double f1, double f2, double low, double high, double rate,\
double result[], int useweight, int quiet)
{
// Calculate the (sum of the) window function at central frequency
if ( quiet == 0 )
printf(" -- TASK: Calculating window function ... \n");
double fwin = (low + high)/2.0;
double sumwin = windowsum(fwin, low, high, rate, time, weight, N,\
useweight, quiet);
if ( quiet == 0 ) printf(" ... Done!\n");
// Fill sampling vector with cyclic frequencies
size_t M = arr_util_getstep(f1, f2, rate);
double* freq = malloc(M * sizeof(double));
arr_init_linspace(freq, f1, rate, M);
if ( quiet == 0 )
printf(" -- INFO: Number of sampling frequencies = %li\n", M);
// Arrays for data storage
double* power = malloc(M * sizeof(double));
double* alpha = malloc(M * sizeof(double));
double* beta = malloc(M * sizeof(double));
// Subtract the mean to avoid "zero-frequency" problems
double fmean = arr_mean(flux, N);
arr_sca_add(flux, -fmean, N);
// Calculate power spectrum and save alphas and betas
if ( quiet == 0 ) printf(" -- TASK: Calculating power spectrum ... \n");
fourier(time, flux, weight, freq, N, M, power, alpha, beta, useweight);
if ( quiet == 0 ) printf(" ... Done!\n");
// Generate new time series
if ( quiet == 0 ) printf(" -- TASK: Calculating new time series ... \n");
double sumfilt, ny;
#pragma omp parallel default(shared) private(sumfilt, ny)
{
#pragma omp for schedule(static)
for (size_t i = 0; i < N; ++i) {
sumfilt = 0;
for (size_t j = 0; j < M; ++j) {
ny = freq[j] * PI2micro;
sumfilt += alpha[j]*sin(ny*time[i]) + beta[j]*cos(ny*time[i]);
}
result[i] = sumfilt / sumwin;
}
}
if ( quiet == 0 ) printf(" ... Done!\n");
// Add the mean again -- also to the filter
arr_sca_add(flux, fmean, N);
arr_sca_add(result, fmean, N);
// Done!
free(freq);
free(power);
free(alpha);
free(beta);
}
/* Lowpass filter
* --> Basically just a wrapper for the bandpass filter
*
* Arguments:
* - `time` : Array of times. In seconds!
* - `flux` : Array of data.
* - `weight` : Array of statistical weights.
* - `N` : Length of the time series
* - `flow` : Frequency limit of filter
* - `low`, `high`: Frequency interval to calculate the spectrum
* - `rate` : Frequency sampling
* - `result` : OUTPUT -- Array containing filtered data
* - `useweight` : Flag to signal whether to use weights or not (0 = no weights)
* - `quiet` : Flag. 0 = verbose output. 1 = no output to console
*/
void lowpass(double time[], double flux[], double weight[], size_t N,\
double flow, double low, double high, double rate, \
double result[], int useweight, int quiet)
{
// Call bandpass filter from zero to lowpass frequency
double fzero = rate; // Not defined for exactly zero!
bandpass(time, flux, weight, N, fzero, flow, low, high, rate, result,\
useweight, quiet);
}
/* Highpass filter
* --> Basically just a wrapper for the lowpass filter
*
* Arguments:
* - `time` : Array of times. In seconds!
* - `flux` : Array of data.
* - `weight` : Array of statistical weights.
* - `N` : Length of the time series
* - `flow` : Frequency limit of filter
* - `low`, `high`: Frequency interval to calculate the spectrum
* - `rate` : Frequency sampling
* - `result` : OUTPUT -- Array containing filtered data
* - `useweight` : Flag to signal whether to use weights or not (0 = no weights)
* - `quiet` : Flag. 0 = verbose output. 1 = no output to console
*/
void highpass(double time[], double flux[], double weight[], size_t N,\
double fhigh, double low, double high, double rate, \
double result[], int useweight, int quiet)
{
// Make temporary array
double* temp = malloc(N * sizeof(double));
// Run lowpass filter
lowpass(time, flux, weight, N, fhigh, low, high, rate, temp, useweight,\
quiet);
// Calculate highpass
for (size_t i = 0; i < N; ++i) {
result[i] = flux[i] - temp[i];
}
// Done!
free(temp);
}
|
ast-dump-openmp-taskloop-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp taskloop simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:4:1, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:4:1) *const restrict'
// CHECK-NEXT: | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:10:1, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:10:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:17:1, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:17:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:24:1, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:24:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTaskLoopSimdDirective {{.*}} <line:31:1, col:38>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-<<<NULL>>>
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-<<<NULL>>>
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-<<<NULL>>>
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:31:1) *const restrict'
// CHECK-NEXT: |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: `-IntegerLiteral {{.*}} <col:20> 'int' 0
|
GB_binop__islt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8)
// A*D function (colscale): GB (_AxD__islt_uint8)
// D*A function (rowscale): GB (_DxB__islt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8)
// C=scalar+B GB (_bind1st__islt_uint8)
// C=scalar+B' GB (_bind1st_tran__islt_uint8)
// C=A+scalar GB (_bind2nd__islt_uint8)
// C=A'+scalar GB (_bind2nd_tran__islt_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pred.c | /*
Poisson Factorization for sparse matrices
Based on alternating proximal gradient iteration or conjugate gradient.
Variables must be initialized from outside the main function ('run_poismf').
Writen for C99 standard and OpenMP 2.0 or later.
Reference paper is:
Cortes, David.
"Fast Non-Bayesian Poisson Factorization for Implicit-Feedback Recommendations."
arXiv preprint arXiv:1811.01908 (2018).
BSD 2-Clause License
Copyright (c) 2018-2021, David Cortes
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "poismf.h"
/* Generic helper function that predicts multiple combinations of users and items from already-fit A and B matrices */
void predict_multiple
(
real_t *restrict out,
real_t *restrict A, real_t *restrict B,
sparse_ix *ixA, sparse_ix *ixB,
size_t n, int k,
int nthreads
)
{
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64))
long long ix = 0;
#else
size_t ix = 0;
#endif
size_t k_szt = (size_t) k;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(out, n, A, B, ixA, ixB, k, k_szt)
for (ix = 0; ix < n; ix++) {
out[ix] = cblas_tdot(k, A + (size_t)ixA[ix] * k_szt, 1,
B + (size_t)ixB[ix] * k_szt, 1);
}
}
long double eval_llk
(
real_t *restrict A,
real_t *restrict B,
sparse_ix ixA[],
sparse_ix ixB[],
real_t *restrict X,
size_t nnz, int k,
bool full_llk, bool include_missing,
size_t dimA, size_t dimB,
int nthreads
)
{
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64))
long long ix = 0;
#else
size_t ix = 0;
#endif
real_t pred;
size_t k_szt = (size_t)k;
long double llk = 0;
real_t *restrict sumA = NULL;
real_t *restrict sumB = NULL;
if (!include_missing)
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
reduction(+:llk) private(pred) shared(A, B, ixA, ixB, k, k_szt, X, nnz)
for (ix = 0; ix < nnz; ix++)
{
pred = cblas_tdot(k, A + (size_t)ixA[ix] * k_szt, 1,
B + (size_t)ixB[ix] * k_szt, 1);
llk += X[ix] * log(pred) - pred;
}
}
else
{
sumA = (real_t*) malloc(k_szt * sizeof(real_t));
sumB = (real_t*) malloc(k_szt * sizeof(real_t));
if (sumA == NULL || sumB == NULL) {
fprintf(stderr, "Error: out of memory.\n");
llk = NAN;
goto cleanup;
}
sum_by_cols(sumA, A, dimA, k_szt);
sum_by_cols(sumB, B, dimB, k_szt);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
reduction(+:llk) shared(X, nnz, A, B, ixA, ixB, k, k_szt)
for (ix = 0; ix < nnz; ix++)
llk += X[ix] * log(cblas_tdot(k, A + (size_t)ixA[ix] * k_szt, 1,
B + (size_t)ixB[ix] * k_szt, 1));
llk -= cblas_tdot(k, sumA, 1, sumB, 1);
}
if (full_llk)
{
long double llk_const = 0;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
reduction(+:llk_const) shared(X, nnz)
for (ix = 0; ix < nnz; ix++)
llk_const += lgamma(X[ix] + 1.);
llk -= llk_const;
}
cleanup:
free(sumA);
free(sumB);
return llk;
}
int factors_multiple
(
real_t *restrict A, real_t *restrict B,
real_t *restrict Bsum, real_t *restrict Amean,
real_t *restrict Xr, sparse_ix *restrict Xr_indptr, sparse_ix *restrict Xr_indices,
int k, size_t dimA,
real_t l2_reg, real_t w_mult,
real_t step_size, size_t niter, size_t maxupd,
Method method, bool limit_step, bool reuse_mean,
int nthreads
)
{
/* Note: Bsum should already have the l1 regularization added to it */
size_t k_szt = (size_t) k;
real_t cnst_div;
real_t *Bsum_w = NULL;
real_t *Bsum_w_scaled = NULL;
real_t *Bsum_scaled = NULL;
size_t size_buffer = 1;
switch(method) {
case pg: {size_buffer = 1; break;}
case cg: {size_buffer = 5; break;}
case tncg: {size_buffer = 22; break;}
}
size_buffer *= (k_szt * (size_t)nthreads);
real_t *buffer_arr = (real_t*) malloc(sizeof(real_t) * size_buffer);
int *buffer_int = NULL;
real_t *zeros_tncg = NULL;
real_t *inf_tncg = NULL;
bool unused;
int return_val = 0;
if (buffer_arr == NULL) {
throw_oom:
fprintf(stderr, "Error: out of memory.\n");
return_val = 1;
goto cleanup;
}
if (method == pg) {
if (w_mult == 1.) {
Bsum_scaled = (real_t*)malloc(sizeof(real_t) * k_szt);
if (Bsum_scaled == NULL) goto throw_oom;
}
else {
Bsum_w_scaled = (real_t*)malloc(sizeof(real_t) * k_szt * dimA);
if (Bsum_w_scaled == NULL) goto throw_oom;
}
}
if (w_mult != 1.) {
Bsum_w = (real_t*)malloc(sizeof(real_t) * k_szt * dimA);
if (Bsum_w == NULL) goto throw_oom;
adjustment_Bsum(B, Bsum, Bsum_w,
Xr_indices, Xr_indptr,
dimA, k, w_mult, nthreads);
if (method == pg)
dscal_large(dimA*k_szt, -step_size, Bsum_w);
}
if (method == tncg) {
buffer_int = (int*)malloc(sizeof(int) * k_szt * (size_t)nthreads);
zeros_tncg = (real_t*)calloc(sizeof(real_t), k_szt);
inf_tncg = (real_t*)malloc(sizeof(real_t) * k_szt);
if (buffer_int == NULL || zeros_tncg == NULL || inf_tncg == NULL)
goto throw_oom;
for (size_t ix = 0; ix < k_szt; ix++)
#ifdef USE_FLOAT
inf_tncg[ix] = HUGE_VALF;
#else
inf_tncg[ix] = HUGE_VAL;
#endif
}
/* Initialize all values to the mean of old A, or to small values */
if (reuse_mean || method != tncg) {
for (size_t ia = 0; ia < dimA; ia++)
memcpy(A + ia*k_szt, Amean, k_szt*sizeof(real_t));
}
switch(method) {
case pg:
{
for (size_t iter = 0; iter < niter; iter++)
{
if (w_mult == 1.) {
memcpy(Bsum_scaled, Bsum, sizeof(real_t) * k_szt);
cblas_tscal(k, -step_size, Bsum_scaled, 1);
}
else {
memcpy(Bsum_w_scaled, Bsum_w, sizeof(real_t) * k_szt * dimA);
dscal_large(dimA*k_szt, -step_size, Bsum_w_scaled);
}
cnst_div = 1. / (1. + 2. * l2_reg * step_size);
pg_iteration(A, B, Xr, Xr_indptr, Xr_indices,
dimA, k, cnst_div, Bsum_scaled, Bsum_w_scaled,
step_size, w_mult, maxupd, buffer_arr, nthreads);
step_size *= 0.5;
}
break;
}
case cg:
{
cg_iteration(A, B, Xr, Xr_indptr, Xr_indices,
dimA, k_szt, limit_step,
Bsum, l2_reg, w_mult, maxupd * niter,
buffer_arr, Bsum_w, nthreads);
break;
}
case tncg:
{
tncg_iteration(A, B, reuse_mean, Xr, Xr_indptr, Xr_indices,
dimA, k_szt, Bsum, l2_reg, w_mult, maxupd,
buffer_arr, buffer_int,
NULL, &unused,
zeros_tncg, inf_tncg,
Bsum_w, nthreads);
break;
}
}
cleanup:
free(Bsum_w);
free(Bsum_w_scaled);
free(Bsum_scaled);
free(buffer_arr);
free(buffer_int);
return return_val;
}
int factors_single
(
real_t *restrict out, size_t k,
real_t *restrict Amean, bool reuse_mean,
real_t *restrict X, sparse_ix X_ind[], size_t nnz,
real_t *restrict B, real_t *restrict Bsum,
int maxupd, real_t l2_reg, real_t l1_new, real_t l1_old,
real_t w_mult
)
{
if (nnz == 0) {
memset(out, 0, k*sizeof(real_t));
return 0;
}
/* Note: Bsum should already have the *old* l1 regularization added to it */
int k_int = (int) k;
real_t l1_reg = l1_new - l1_old;
real_t *restrict Bsum_pass = NULL;
real_t *restrict zeros_tncg = (real_t*)calloc(sizeof(real_t), k);
real_t *restrict inf_tncg = (real_t*)malloc(sizeof(real_t) * k);
int ret_code = 0;
fdata data = { B, NULL, X, X_ind, nnz, l2_reg, w_mult, k_int };
real_t fun_val = 0;
int niter = 0;
int nfeval = 0;
if (zeros_tncg == NULL || inf_tncg == NULL)
goto throw_oom;
if (l1_reg > 0. || w_mult != 1.)
{
Bsum_pass = (real_t*)malloc(sizeof(real_t) * k);
if (Bsum_pass == NULL) {
throw_oom:
fprintf(stderr, "Error: out of memory.\n");
ret_code = 1;
goto cleanup;
}
if (w_mult != 1.) {
memset(Bsum_pass, 0, sizeof(real_t) * k);
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(k_int, 1., B + X_ind[ix]*k, 1, Bsum_pass, 1);
cblas_tscal(k_int, w_mult - 1., Bsum_pass, 1);
cblas_taxpy(k_int, 1., Bsum, 1, Bsum_pass, 1);
}
else {
memcpy(Bsum_pass, Bsum, sizeof(real_t) * k);
}
if (l1_reg > 0.) {
for (size_t ix = 0; ix < k; ix++)
Bsum_pass[ix] += l1_reg;
}
}
else {
Bsum_pass = Bsum;
}
data.Bsum = Bsum_pass;
for (size_t ix = 0; ix < k; ix++)
#ifdef USE_FLOAT
inf_tncg[ix] = HUGE_VALF;
#else
inf_tncg[ix] = HUGE_VAL;
#endif
/* Initialize to the mean of current factors, or to small numbers */
if (reuse_mean) {
memcpy(out, Amean, k*sizeof(real_t));
} else {
for (size_t ix = 0; ix < k; ix++)
out[ix] = 1e-3;
}
ret_code = tnc(
k_int, out, &fun_val,
(real_t*)NULL,
calc_fun_and_grad,
(void*) &data, zeros_tncg, inf_tncg, (real_t*)NULL,
(real_t*)NULL, 0, -1, maxupd,
0.25, 10., 0., 0.,
1e-4, -1., -1., 1.3,
&nfeval, &niter,
(real_t*)NULL, (int*)NULL);
if (ret_code == -3) {
goto throw_oom;
} else {
ret_code = 0;
}
cleanup:
if (l1_reg > 0. || w_mult != 1.)
free(Bsum_pass);
free(zeros_tncg);
free(inf_tncg);
return ret_code;
}
|
ssha512_fmt_plug.c | /*
* ssha512 support for LDAP style password storage
*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha2);
#else
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "johnswap.h"
#include "common.h"
#include "sha2.h"
#include "base64.h"
#include "simd-intrinsics.h"
#include <string.h>
#include "rawSHA512_common.h"
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "SSHA512"
#define FORMAT_NAME "LDAP"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define PLAINTEXT_LENGTH (111-NSLDAP_SALT_LEN)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
struct s_salt
{
unsigned int len;
union {
unsigned char c[NSLDAP_SALT_LEN];
uint32_t w32;
} data;
};
static struct s_salt *saved_salt;
#ifdef SIMD_COEF_64
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
static uint64_t (*saved_key)[SHA_BUF_SIZ*SIMD_COEF_64];
static uint64_t (*crypt_out)[8*SIMD_COEF_64];
static uint64_t (**len_ptr64);
static int max_count;
#else
static uint32_t (*crypt_out)[DIGEST_SIZE / 4];
static uint64_t (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static int *saved_len;
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_64
unsigned int i, j;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#ifndef SIMD_COEF_64
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
len_ptr64 = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*len_ptr64), MEM_ALIGN_SIMD);
saved_key = mem_calloc_align(self->params.max_keys_per_crypt /
SIMD_COEF_64,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt /
SIMD_COEF_64,
sizeof(*crypt_out), MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; i += SIMD_COEF_64) {
uint64_t *keybuffer = &((uint64_t *)saved_key)[(i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64)*SHA_BUF_SIZ*SIMD_COEF_64];
for (j = 0; j < SIMD_COEF_64; ++j) {
len_ptr64[i+j] = &keybuffer[15*SIMD_COEF_64];
++keybuffer;
}
}
max_count = self->params.max_keys_per_crypt;
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifdef SIMD_COEF_64
MEM_FREE(len_ptr64);
#endif
MEM_FREE(saved_len);
}
#ifdef SIMD_COEF_64
static void set_key(char *key, int index) {
#if ARCH_ALLOWS_UNALIGNED
const uint64_t *wkey = (uint64_t*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t));
const uint64_t *wkey = is_aligned(key, sizeof(uint64_t)) ?
(uint64_t*)key : (uint64_t*)strcpy(buf_aligned, key);
#endif
uint64_t *keybuffer = &((uint64_t *)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
uint64_t *keybuf_word = keybuffer;
unsigned int len;
uint64_t temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP64(temp & 0xff);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP64(temp & 0xffff);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP64(temp & 0xffffff);
len+=3;
goto key_cleaning;
}
if (!(temp & 0xff00000000ULL))
{
*keybuf_word = JOHNSWAP64(temp & 0xffffffff);
len+=4;
goto key_cleaning;
}
if (!(temp & 0xff0000000000ULL))
{
*keybuf_word = JOHNSWAP64(temp & 0xffffffffffULL);
len+=5;
goto key_cleaning;
}
if (!(temp & 0xff000000000000ULL))
{
*keybuf_word = JOHNSWAP64(temp & 0xffffffffffffULL);
len+=6;
goto key_cleaning;
}
if (!(temp & 0xff00000000000000ULL))
{
*keybuf_word = JOHNSWAP64(temp & 0xffffffffffffffULL);
len+=7;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP64(temp);
len += 8;
keybuf_word += SIMD_COEF_64;
}
key_cleaning:
saved_len[index] = len;
keybuf_word += SIMD_COEF_64;
while(*keybuf_word && keybuf_word < &keybuffer[15*SIMD_COEF_64]) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_64;
}
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
memcpy(saved_key[index], key, len + 1);
}
#endif
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char *p;
char realcipher[CIPHERTEXT_LENGTH];
int len;
ciphertext += NSLDAP_TAG_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
base64_decode(ciphertext, len, realcipher);
// We now support any salt length up to NSLDAP_SALT_SIZE
cursalt.len = (len + 3) / 4 * 3 - DIGEST_SIZE;
p = &ciphertext[len];
while (*--p == '=')
cursalt.len--;
memcpy(cursalt.data.c, realcipher+DIGEST_SIZE, cursalt.len);
return &cursalt;
}
#ifdef SIMD_COEF_64
static char *get_key(int index) {
unsigned i;
uint64_t s;
static char out[PLAINTEXT_LENGTH + 1];
unsigned char *wucp = (unsigned char*)saved_key;
s = saved_len[index];
for (i=0;i<(unsigned)s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index) {
return (char*)saved_key[index];
}
#endif
static int cmp_all(void *binary, int count) {
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t *) binary)[0] == crypt_out[index/SIMD_COEF_64][index&(SIMD_COEF_64-1)])
#else
if ( ((uint32_t*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < DIGEST_SIZE/sizeof(uint64_t); i++)
if (((uint64_t *) binary)[i] != crypt_out[index/SIMD_COEF_64][(index&(SIMD_COEF_64-1))+i*SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], DIGEST_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_salt(void *salt) {
saved_salt = salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT) {
#ifndef SIMD_COEF_64
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Update(&ctx, saved_salt->data.c, saved_salt->len);
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
#else
// We have to append salt (and re-clean buffer if it is dirty),
// then append final length of password.salt
int i, j;
unsigned char *sk = (unsigned char*)saved_key;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
int idx = i+index;
int x = saved_len[idx];
for (j = 0; j < saved_salt->len; ++j)
sk[GETPOS(x+j,idx)] = saved_salt->data.c[j];
x += j;
sk[GETPOS(x,idx)] = 0x80;
++x;
while (sk[GETPOS(x,idx)]) {
sk[GETPOS(x,idx)] = 0;
++x;
}
*(len_ptr64[idx]) = (saved_len[idx]+saved_salt->len)<<3;
}
SIMDSHA512body(&saved_key[index/SIMD_COEF_64], crypt_out[index/SIMD_COEF_64], NULL, SSEi_MIXED_IN);
#endif
}
return count;
}
#ifdef SIMD_COEF_64
static int get_hash_0 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
NSLDAP_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
DIGEST_SIZE,
BINARY_ALIGN,
NSLDAP_SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NSLDAP_FORMAT_TAG },
sha512_common_tests_ssha512
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
sha512_common_valid_nsldap,
fmt_default_split,
sha512_common_binary_nsldap,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(4*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t3+Nx,32),floord(Nt+Nx-4,32)),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),32*t4+30),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
omp_parallel_sections_reduction.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel sections reduction directive with all its option.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel sections reduction</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_sections_reduction</ompts:testcode:functionname>(FILE * logFile){
int sum=7;
int known_sum;
double dpt=1,dsum=0;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-5;
int diff;
double ddiff;
int product=1;
int known_product;
int logic_and=1;
int bit_and=1;
int logic_or=0;
int bit_or=0;
int exclusiv_bit_or=0;
int logics[1000];
int i;
int result=0;
/* int my_islarger;*/
/*int is_larger=1;*/
known_sum = (999*1000)/2+7;
#pragma omp parallel sections private(i) <ompts:check>reduction(+:sum)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
sum=sum+i;
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
sum=sum+i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
sum=sum+i;
}
}
}
if(known_sum!=sum)
{
result++;
fprintf(logFile,"Error in sum with integers: Result was %d instead of %d.\n",sum, known_sum);
}
diff = (999*1000)/2;
#pragma omp parallel sections private(i) <ompts:check>reduction(-:diff)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
diff=diff-i;
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
diff=diff-i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
diff=diff-i;
}
}
}
if(diff != 0)
{
result++;
fprintf(logFile,"Error in Difference with integers: Result was %d instead of 0.\n",diff);
}
for (i=0;i<20;++i)
{
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) <ompts:check>reduction(+:dsum)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=0;i<6;++i)
{
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i)
{
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i)
{
dsum += pow(dt,i);
}
}
}
if( fabs(dsum-dknown_sum) > rounding_error )
{
result++;
fprintf(logFile,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum);
}
dpt=1;
for (i=0;i<20;++i)
{
dpt*=dt;
}
fprintf(logFile,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) <ompts:check>reduction(-:ddiff)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=0;i<6;++i)
{
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i)
{
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i)
{
ddiff -= pow(dt,i);
}
}
}
if( fabs(ddiff) > rounding_error)
{
result++;
fprintf(logFile,"Error in Difference with doubles: Result was %E instead of 0.0\n",ddiff);
}
known_product = 3628800;
#pragma omp parallel sections private(i) <ompts:check>reduction(*:product)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=1;i<3;i++)
{
product *= i;
}
}
#pragma omp section
{
for(i=3;i<7;i++)
{
product *= i;
}
}
#pragma omp section
{
for(i=7;i<11;i++)
{
product *= i;
}
}
}
if(known_product != product)
{
result++;
fprintf(logFile,"Error in Product with integers: Result was %d instead of %d\n",product,known_product);
}
for(i=0;i<1000;i++)
{
logics[i]=1;
}
#pragma omp parallel sections private(i) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if(!logic_and)
{
result++;
fprintf(logFile,"Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if(logic_and)
{
result++;
fprintf(logFile,"Error in logic AND part 2");
}
for(i=0;i<1000;i++)
{
logics[i]=0;
}
#pragma omp parallel sections private(i) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
logic_or = (logic_or || logics[i]);
}
}
}
if(logic_or)
{
result++;
fprintf(logFile,"Error in logic OR part 1\n");
}
logic_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for (i=1;i<300;i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++)
{
logic_or = (logic_or || logics[i]);
}
}
}
if(!logic_or)
{
result++;
fprintf(logFile,"Error in logic OR part 2\n");
}
for(i=0;i<1000;++i)
{
logics[i]=1;
}
#pragma omp parallel sections private(i) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
bit_and = (bit_and & logics[i]);
}
}
}
if(!bit_and)
{
result++;
fprintf(logFile,"Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501]=0;
#pragma omp parallel sections private(i) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
bit_and = bit_and & logics[i];
}
}
}
if(bit_and)
{
result++;
fprintf(logFile,"Error in BIT AND part 2");
}
for(i=0;i<1000;i++)
{
logics[i]=0;
}
#pragma omp parallel sections private(i) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
bit_or = bit_or | logics[i];
}
}
}
if(bit_or)
{
result++;
fprintf(logFile,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
bit_or = bit_or | logics[i];
}
}
}
if(!bit_or)
{
result++;
fprintf(logFile,"Error in BIT OR part 2\n");
}
for(i=0;i<1000;i++)
{
logics[i]=0;
}
#pragma omp parallel sections private(i) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(exclusiv_bit_or)
{
result++;
fprintf(logFile,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
{
#pragma omp section
{
for(i=0;i<300;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(!exclusiv_bit_or)
{
result++;
fprintf(logFile,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
</ompts:testcode>
</ompts:test>
|
primordial.c | /** @file primordial.c Documented primordial module.
*
* Julien Lesgourgues, 24.08.2010
*
* This module computes the primordial spectra. It can be used in different modes:
* simple parametric form, evolving inflaton perturbations, etc. So far only
* the mode corresponding to a simple analytic form in terms of amplitudes, tilts
* and runnings has been developed.
*
* The following functions can be called from other modules:
*
* -# primordial_init() at the beginning (anytime after perturb_init() and before spectra_init())
* -# primordial_spectrum_at_k() at any time for computing P(k) at any k
* -# primordial_free() at the end
*/
#include "primordial.h"
/**
* Primordial spectra for arbitrary argument and for all initial conditions.
*
* This routine evaluates the primordial spectrum at a given value of k by
* interpolating in the pre-computed table.
*
* When k is not in the pre-computed range but the spectrum can be found
* analytically, it finds it. Otherwise returns an error.
*
* Can be called in two modes; linear or logarithmic:
*
* - linear: takes k, returns P(k)
*
* - logarithmic: takes ln(k), return ln(P(k))
*
* One little subtlety: in case of several correlated initial conditions,
* the cross-correlation spectrum can be negative. Then, in logarithmic mode,
* the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$
* (from -1 to 1) instead of \f$\ln{P_{12}}\f$
*
* This function can be
* called from whatever module at whatever time, provided that
* primordial_init() has been called before, and primordial_free() has not
* been called yet.
*
* @param ppm Input: pointer to primordial structure containing tabulated primordial spectrum
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param mode Input: linear or logarithmic
* @param input Input: wavenumber in 1/Mpc (linear mode) or its logarithm (logarithmic mode)
* @param output Output: for each pair of initial conditions, primordial spectra P(k) in \f$Mpc^3\f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode)
* @return the error status
*/
int primordial_spectrum_at_k(
struct primordial * ppm,
int index_md,
enum linear_or_logarithmic mode,
double input,
double * output /* array with argument output[index_ic1_ic2] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_ic1,index_ic2,index_ic1_ic2;
double lnk;
int last_index;
/** - infer ln(k) from input. In linear mode, reject negative value of input k value. */
if (mode == linear) {
class_test(input<=0.,
ppm->error_message,
"k = %e",input);
lnk=log(input);
}
else {
lnk = input;
}
/** - if ln(k) is not in the interpolation range, return an error, unless
we are in the case of a analytic spectrum, for which a direct computation is possible */
if ((lnk > ppm->lnk[ppm->lnk_size-1]) || (lnk < ppm->lnk[0])) {
class_test(ppm->primordial_spec_type != analytic_Pk,
ppm->error_message,
"k=%e out of range [%e : %e]",exp(lnk),exp(ppm->lnk[0]),exp(ppm->lnk[ppm->lnk_size-1]));
/* direct computation */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
exp(lnk),
&(output[index_ic1_ic2])),
ppm->error_message,
ppm->error_message);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
/* if mode==linear, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == logarithmic) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2] = log(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] /= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
}
}
}
}
/** - otherwise, interpolate in the pre-computed table */
else {
class_call(array_interpolate_spline(
ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ddlnpk[index_md],
ppm->ic_ic_size[index_md],
lnk,
&last_index,
output,
ppm->ic_ic_size[index_md],
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* if mode==logarithmic, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == linear) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2]=exp(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] *= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
}
}
return _SUCCESS_;
}
/**
* This routine initializes the primordial structure (in particular, it computes table of primordial spectrum values)
*
* @param ppr Input: pointer to precision structure (defines method and precision for all computations)
* @param ppt Input: pointer to perturbation structure (useful for knowing k_min, k_max, etc.)
* @param ppm Output: pointer to initialized primordial structure
* @return the error status
*/
int primordial_init(
struct precision * ppr,
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
/** - define local variables */
double k,k_min,k_max;
int index_md,index_ic1,index_ic2,index_ic1_ic2,index_k;
double pk,pk1,pk2;
double dlnk,lnpk_pivot,lnpk_minus,lnpk_plus,lnpk_minusminus,lnpk_plusplus;
/* uncomment if you use optional test below
(for correlated isocurvature modes) */
//double cos_delta_k;
/** - check that we really need to compute the primordial spectra */
if (ppt->has_perturbations == _FALSE_) {
ppm->lnk_size=0;
if (ppm->primordial_verbose > 0)
printf("No perturbations requested. Primordial module skipped.\n");
return _SUCCESS_;
}
else {
if (ppm->primordial_verbose > 0)
printf("Computing primordial spectra");
}
/** - get kmin and kmax from perturbation structure. Test that they make sense. */
k_min = ppt->k_min; /* first value, inferred from perturbations structure */
k_max = ppt->k_max; /* last value, inferred from perturbations structure */
class_test(k_min <= 0.,
ppm->error_message,
"k_min negative or null: stop to avoid segmentation fault");
class_test(k_max <= 0.,
ppm->error_message,
"k_max negative or null: stop to avoid segmentation fault");
class_test(ppm->k_pivot <= 0.,
ppm->error_message,
"k_pivot negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= 0.,
ppm->error_message,
"k_per_decade_primordial negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= _K_PER_DECADE_PRIMORDIAL_MIN_,
ppm->error_message,
"k_per_decade_primordial = %e: you ask for such a sparse sampling of the primordial spectrum that this is probably a mistake",
ppr->k_per_decade_primordial);
/** - allocate and fill values of \f$ \ln{k}\f$'s */
class_call(primordial_get_lnk_list(ppm,
k_min,
k_max,
ppr->k_per_decade_primordial
),
ppm->error_message,
ppm->error_message);
/** - define indices and allocate tables in primordial structure */
class_call(primordial_indices(ppt,
ppm),
ppm->error_message,
ppm->error_message);
/** - deal with case of analytic primordial spectra (with amplitudes, tilts, runnings, etc.) */
if (ppm->primordial_spec_type == analytic_Pk) {
if (ppm->primordial_verbose > 0)
printf(" (analytic spectrum)\n");
class_call_except(primordial_analytic_spectrum_init(ppt,
ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
for (index_k = 0; index_k < ppm->lnk_size; index_k++) {
k=exp(ppm->lnk[index_k]);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
k,
&pk),
ppm->error_message,
ppm->error_message);
if (index_ic1 == index_ic2) {
/* diagonal coefficients: ln[P(k)] */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = log(pk);
}
else {
/* non-diagonal coefficients: cosDelta(k) = P(k)_12/sqrt[P(k)_1 P(k)_2] */
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]),
k,
&pk1),
ppm->error_message,
ppm->error_message);
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]),
k,
&pk2),
ppm->error_message,
ppm->error_message);
/* either return an error if correlation is too large... */
/*
cos_delta_k = pk/sqrt(pk1*pk2);
class_test_except((cos_delta_k < -1.) || (cos_delta_k > 1.),
ppm->error_message,
primordial_free(ppm),
"correlation angle between IC's takes unphysical values");
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = cos_delta_k;
*/
/* ... or enforce definite positive correlation matrix */
if (pk > sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 1.;
else if (pk < -sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = -1.;
else
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = pk/sqrt(pk1*pk2);
}
}
else {
/* non-diagonal coefficients when ic's are uncorrelated */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 0.;
}
}
}
}
}
}
/** - deal with case of inflation with given \f$V(\phi)\f$ or \f$H(\phi)\f$ */
else if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_H) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_indices(ppm),
ppm->error_message,
ppm->error_message);
if (ppm->primordial_verbose > 0)
printf(" (simulating inflation)\n");
class_call_except(primordial_inflation_solve_inflation(ppt,ppm,ppr),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
/** - deal with the case of external calculation of \f$ P_k \f$*/
else if (ppm->primordial_spec_type == external_Pk) {
class_test(ppt->has_scalars == _FALSE_,
ppm->error_message,
"external Pk module cannot work if you do not ask for scalar modes");
class_test(ppt->has_vectors == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for vector modes");
class_test(ppt->has_bi == _TRUE_ || ppt->has_cdi == _TRUE_ || ppt->has_nid == _TRUE_ || ppt->has_niv == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for isocurvature modes (but that could be implemented easily in the future!)");
if (ppm->primordial_verbose > 0)
printf(" (Pk calculated externally)\n");
class_call_except(primordial_external_spectrum_init(ppt,ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
else {
class_test(0==0,
ppm->error_message,
"primordial spectrum type not recognized");
}
/** - compute second derivative of each \f$ \ln{P_k} \f$ versus lnk with spline, in view of interpolation */
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_call(array_spline_table_lines(ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ic_ic_size[index_md],
ppm->ddlnpk[index_md],
_SPLINE_EST_DERIV_,
ppm->error_message),
ppm->error_message,
ppm->error_message);
}
/** - derive spectral parameters from numerically computed spectra
(not used by the rest of the code, but useful to keep in memory for several types of investigation) */
if (ppm->primordial_spec_type != analytic_Pk) {
dlnk = log(10.)/ppr->k_per_decade_primordial;
if (ppt->has_scalars == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->A_s = exp(lnpk_pivot);
ppm->n_s = (lnpk_plus-lnpk_minus)/(2.*dlnk)+1.;
ppm->alpha_s = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
/** - expression for alpha_s comes from:
`ns_2 = (lnpk_plus-lnpk_pivot)/(dlnk)+1`
`ns_1 = (lnpk_pivot-lnpk_minus)/(dlnk)+1`
`alpha_s = dns/dlnk = (ns_2-ns_1)/dlnk = (lnpk_plus-lnpk_pivot-lnpk_pivot+lnpk_minus)/(dlnk)/(dlnk)`
**/
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+2.*dlnk,
&lnpk_plusplus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-2.*dlnk,
&lnpk_minusminus),
ppm->error_message,
ppm->error_message);
/** - expression for beta_s:
`ppm->beta_s = (alpha_plus-alpha_minus)/dlnk = (lnpk_plusplus-2.*lnpk_plus+lnpk_pivot -
(lnpk_pivot-2.*lnpk_minus+lnpk_minusminus)/pow(dlnk,3)`
**/
/* Simplification of the beta_s expression: */
ppm->beta_s = (lnpk_plusplus-2.*lnpk_plus+2.*lnpk_minus-lnpk_minusminus)/pow(dlnk,3);
if (ppm->primordial_verbose > 0)
printf(" -> A_s=%g n_s=%g alpha_s=%g\n",ppm->A_s,ppm->n_s,ppm->alpha_s);
}
if (ppt->has_tensors == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->r = exp(lnpk_pivot)/ppm->A_s;
ppm->n_t = (lnpk_plus-lnpk_minus)/(2.*dlnk);
ppm->alpha_t = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
if (ppm->primordial_verbose > 0)
printf(" -> r=%g n_t=%g alpha_t=%g\n",ppm->r,ppm->n_t,ppm->alpha_t);
}
}
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by primordial_init().
*
* To be called at the end of each run.
*
* @param ppm Input: pointer to primordial structure (which fields must be freed)
* @return the error status
*/
int primordial_free(
struct primordial * ppm
) {
int index_md;
if (ppm->lnk_size > 0) {
if (ppm->primordial_spec_type == analytic_Pk) {
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->amplitude[index_md]);
free(ppm->tilt[index_md]);
free(ppm->running[index_md]);
}
free(ppm->amplitude);
free(ppm->tilt);
free(ppm->running);
}
else if (ppm->primordial_spec_type == external_Pk) {
free(ppm->command);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->lnpk[index_md]);
free(ppm->ddlnpk[index_md]);
free(ppm->is_non_zero[index_md]);
}
free(ppm->lnpk);
free(ppm->ddlnpk);
free(ppm->is_non_zero);
free(ppm->ic_size);
free(ppm->ic_ic_size);
free(ppm->lnk);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the primordial structure
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_indices(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md;
ppm->md_size = ppt->md_size;
class_alloc(ppm->lnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ddlnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->ic_ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->is_non_zero,ppm->md_size*sizeof(short *),ppm->error_message);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
ppm->ic_size[index_md] = ppt->ic_size[index_md];
ppm->ic_ic_size[index_md] = (ppm->ic_size[index_md]*(ppm->ic_size[index_md]+1))/2;
class_alloc(ppm->lnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->ddlnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->is_non_zero[index_md],
ppm->ic_ic_size[index_md]*sizeof(short),
ppm->error_message);
}
return _SUCCESS_;
}
/**
* This routine allocates and fills the list of wavenumbers k
*
*
* @param ppm Input/output: pointer to primordial structure
* @param kmin Input: first value
* @param kmax Input: last value that we should encompass
* @param k_per_decade Input: number of k per decade
* @return the error status
*/
int primordial_get_lnk_list(
struct primordial * ppm,
double kmin,
double kmax,
double k_per_decade
) {
int i;
class_test((kmin <= 0.) || (kmax <= kmin),
ppm->error_message,
"inconsistent values of kmin=%e, kmax=%e",kmin,kmax);
ppm->lnk_size = (int)(log(kmax/kmin)/log(10.)*k_per_decade) + 2;
class_alloc(ppm->lnk,ppm->lnk_size*sizeof(double),ppm->error_message);
for (i=0; i<ppm->lnk_size; i++)
ppm->lnk[i]=log(kmin)+i*log(10.)/k_per_decade;
return _SUCCESS_;
}
/**
* This routine interprets and stores in a condensed form the input parameters
* in the case of a simple analytic spectra with amplitudes, tilts, runnings,
* in such way that later on, the spectrum can be obtained by a quick call to
* the routine primordial_analytic_spectrum(()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_analytic_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md,index_ic1,index_ic2;
int index_ic1_ic2,index_ic1_ic1,index_ic2_ic2;
double one_amplitude=0.;
double one_tilt=0.;
double one_running=0.;
double one_correlation=0.;
class_alloc(ppm->amplitude,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->tilt,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->running,
ppm->md_size*sizeof(double *),
ppm->error_message);
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_alloc(ppm->amplitude[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->tilt[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->running[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
/* diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (index_ic1 == ppt->index_ic_ad)) {
one_amplitude = ppm->A_s;
one_tilt = ppm->n_s;
one_running = ppm->alpha_s;
}
if ((ppt->has_bi == _TRUE_) && (index_ic1 == ppt->index_ic_bi)) {
one_amplitude = ppm->A_s*ppm->f_bi*ppm->f_bi;
one_tilt = ppm->n_bi;
one_running = ppm->alpha_bi;
}
if ((ppt->has_cdi == _TRUE_) && (index_ic1 == ppt->index_ic_cdi)) {
one_amplitude = ppm->A_s*ppm->f_cdi*ppm->f_cdi;
one_tilt = ppm->n_cdi;
one_running = ppm->alpha_cdi;
}
if ((ppt->has_nid == _TRUE_) && (index_ic1 == ppt->index_ic_nid)) {
one_amplitude = ppm->A_s*ppm->f_nid*ppm->f_nid;
one_tilt = ppm->n_nid;
one_running = ppm->alpha_nid;
}
if ((ppt->has_niv == _TRUE_) && (index_ic1 == ppt->index_ic_niv)) {
one_amplitude = ppm->A_s*ppm->f_niv*ppm->f_niv;
one_tilt = ppm->n_niv;
one_running = ppm->alpha_niv;
}
}
if (_tensors_) {
if (index_ic1 == ppt->index_ic_ten) {
one_amplitude = ppm->A_s*ppm->r;
one_tilt = ppm->n_t+1.; /* +1 to match usual definition of n_t (equivalent to n_s-1) */
one_running = ppm->alpha_t;
}
}
class_test(one_amplitude <= 0.,
ppm->error_message,
"inconsistent input for primordial amplitude: %g for index_md=%d, index_ic=%d\n",
one_amplitude,index_md,index_ic1);
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] = one_amplitude;
ppm->tilt[index_md][index_ic1_ic2] = one_tilt;
ppm->running[index_md][index_ic1_ic2] = one_running;
}
/* non-diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (ppt->has_bi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_bi)) ||
((index_ic1 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_bi)))) {
one_correlation = ppm->c_ad_bi;
one_tilt = ppm->n_ad_bi;
one_running = ppm->alpha_ad_bi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_ad_cdi;
one_tilt = ppm->n_ad_cdi;
one_running = ppm->alpha_ad_cdi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_ad_nid;
one_tilt = ppm->n_ad_nid;
one_running = ppm->alpha_ad_nid;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_ad_niv;
one_tilt = ppm->n_ad_niv;
one_running = ppm->alpha_ad_niv;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_bi_cdi;
one_tilt = ppm->n_bi_cdi;
one_running = ppm->alpha_bi_cdi;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_bi_nid;
one_tilt = ppm->n_bi_nid;
one_running = ppm->alpha_bi_nid;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_bi_niv;
one_tilt = ppm->n_bi_niv;
one_running = ppm->alpha_bi_niv;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_cdi_nid;
one_tilt = ppm->n_cdi_nid;
one_running = ppm->alpha_cdi_nid;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_cdi_niv;
one_tilt = ppm->n_cdi_niv;
one_running = ppm->alpha_cdi_niv;
}
if ((ppt->has_nid == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_nid) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_nid) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_nid_niv;
one_tilt = ppm->n_nid_niv;
one_running = ppm->alpha_nid_niv;
}
}
class_test((one_correlation < -1) || (one_correlation > 1),
ppm->error_message,
"inconsistent input for isocurvature cross-correlation\n");
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
index_ic1_ic1 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
index_ic2_ic2 = index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]);
if (one_correlation == 0.) {
ppm->is_non_zero[index_md][index_ic1_ic2] = _FALSE_;
ppm->amplitude[index_md][index_ic1_ic2] = 0.;
ppm->tilt[index_md][index_ic1_ic2] = 0.;
ppm->running[index_md][index_ic1_ic2] = 0.;
}
else {
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] =
sqrt(ppm->amplitude[index_md][index_ic1_ic1]*
ppm->amplitude[index_md][index_ic2_ic2])*
one_correlation;
ppm->tilt[index_md][index_ic1_ic2] =
0.5*(ppm->tilt[index_md][index_ic1_ic1]
+ppm->tilt[index_md][index_ic2_ic2])
+ one_tilt;
ppm->running[index_md][index_ic1_ic2] =
0.5*(ppm->running[index_md][index_ic1_ic1]
+ppm->running[index_md][index_ic2_ic2])
+ one_running;
}
}
}
}
return _SUCCESS_;
}
/**
* This routine returns the primordial spectrum in the simple analytic case with
* amplitudes, tilts, runnings, for each mode (scalar/tensor...),
* pair of initial conditions, and wavenumber.
*
* @param ppm Input/output: pointer to primordial structure
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param index_ic1_ic2 Input: pair of initial conditions (ic1, ic2)
* @param k Input: wavenumber in same units as pivot scale, i.e. in 1/Mpc
* @param pk Output: primordial power spectrum A (k/k_pivot)^(n+...)
* @return the error status
*/
int primordial_analytic_spectrum(
struct primordial * ppm,
int index_md,
int index_ic1_ic2,
double k,
double * pk
) {
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
*pk = ppm->amplitude[index_md][index_ic1_ic2]
*exp((ppm->tilt[index_md][index_ic1_ic2]-1.)*log(k/ppm->k_pivot)
+ 0.5 * ppm->running[index_md][index_ic1_ic2] * pow(log(k/ppm->k_pivot), 2.));
}
else {
*pk = 0.;
}
return _SUCCESS_;
}
/**
* This routine encodes the inflaton scalar potential
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
double e,de,dde,mu,dmu,ddmu,l,dl,ddl,p,dp,ddp;
switch (ppm->potential) {
/* V(phi)=polynomial in phi */
case polynomial:
*V = ppm->V0+phi*ppm->V1+pow(phi,2)/2.*ppm->V2+pow(phi,3)/6.*ppm->V3+pow(phi,4)/24.*ppm->V4;
*dV = ppm->V1+phi*ppm->V2+pow(phi,2)/2.*ppm->V3+pow(phi,3)/6.*ppm->V4;
*ddV = ppm->V2+phi*ppm->V3+pow(phi,2)/2.*ppm->V4;
break;
/* V(phi) = Lambda^4(1+cos(phi/f)) = V0 (1+cos(phi/V1)) */
case natural:
*V = ppm->V0*(1.+cos(phi/ppm->V1));
*dV = -ppm->V0/ppm->V1*sin(phi/ppm->V1);
*ddV = -ppm->V0/ppm->V1/ppm->V1*cos(phi/ppm->V1);
break;
/* Higgs inflation from arXiv:1403.6078 */
case higgs_inflation:
// correspondence with 1403.6078:
// V0 = b
// V1 = ksi
// V2 = kappa
// V3 = delta_lambda
// mu = bar(mu)/M_P
// phi = -chi/M_P
e = exp(2./sqrt(6.)*sqrt(8.*_PI_)*phi);
de = 2./sqrt(6.)*sqrt(8.*_PI_)*e;
dde = 2./3. * 8.*_PI_ * e;
mu = pow(1.-e,0.5);
dmu = -0.5*de*pow(1.-e,-0.5);
ddmu = -0.5*dde*pow(1.-e,-0.5)-0.25*de*de*pow(1.-e,-1.5);
l = log(mu/ppm->V2);
dl = dmu/mu;
ddl = ddmu/mu - dl*dl;
p = 1./16. + ppm->V3/ppm->V0 + l*l;
dp = 2.*dl*l;
ddp = 2.*ddl*l+2.*dl*dl;
*V = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*p*pow(mu,4);
*dV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(dp*pow(mu,4)+4.*p*dmu*pow(mu,3));
*ddV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(ddp*pow(mu,4)+8.*dp*dmu*pow(mu,3)+4.*p*ddmu*pow(mu,3)+12.*p*pow(dmu*mu,2));
//fprintf(stderr,"%e %e %e\n",*V,p,mu);
break;
/* code here other shapes */
default:
class_stop(ppm->error_message,"ppm->potential=%d different from all known cases",ppm->potential);
break;
}
return _SUCCESS_;
}
/**
* This routine encodes the function \f$ H(\phi)\f$
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
*H = ppm->H0 + phi*ppm->H1 + pow(phi,2)/2.*ppm->H2 + pow(phi,3)/6.*ppm->H3 + pow(phi,4)/24.*ppm->H4;
*dH = ppm->H1 + phi*ppm->H2 + pow(phi,2)/2.*ppm->H3 + pow(phi,3)/6.*ppm->H4;
*ddH = ppm->H2 + phi*ppm->H3 + pow(phi,2)/2.*ppm->H4;
*dddH = ppm->H3 + phi*ppm->H4;
return _SUCCESS_;
}
/**
* This routine defines indices used by the inflation simulator
*
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_inflation_indices(
struct primordial * ppm
) {
int index_in;
index_in = 0;
/* indices for background quantities */
ppm->index_in_a = index_in;
index_in ++;
ppm->index_in_phi = index_in;
index_in ++;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
ppm->index_in_dphi = index_in;
index_in ++;
}
/* size of background vector */
ppm->in_bg_size = index_in;
/* indices for perturbations */
ppm->index_in_ksi_re = index_in;
index_in ++;
ppm->index_in_ksi_im = index_in;
index_in ++;
ppm->index_in_dksi_re = index_in;
index_in ++;
ppm->index_in_dksi_im = index_in;
index_in ++;
ppm->index_in_ah_re = index_in;
index_in ++;
ppm->index_in_ah_im = index_in;
index_in ++;
ppm->index_in_dah_re = index_in;
index_in ++;
ppm->index_in_dah_im = index_in;
index_in ++;
/* size of perturbation vector */
ppm->in_size = index_in;
return _SUCCESS_;
}
/**
* Main routine of inflation simulator. Its goal is to check the
* background evolution before and after the pivot value
* phi=phi_pivot, and then, if this evolution is suitable, to call the
* routine primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @return the error status
*/
int primordial_inflation_solve_inflation(
struct perturbs * ppt,
struct primordial * ppm,
struct precision *ppr
) {
/** Summary: */
/** - define local variables */
double * y;
double * y_ini;
double * dy;
double a_pivot;
double a_try;
double H_pivot;
double H_try;
double phi_try;
double dphidt_pivot;
double dphidt_try;
double aH_ini,aH_end;
double k_max,k_min;
int counter;
double dH,ddH,dddH;
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(y_ini,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - eventually, needs first to find phi_pivot */
if (ppm->primordial_spec_type == inflation_V_end) {
class_call(primordial_inflation_find_phi_pivot(ppm,ppr,y,dy),
ppm->error_message,
ppm->error_message);
}
else {
ppm->phi_pivot = 0.;
}
// uncomment these lines if for checking, you want first-order slow-roll predictions
/*
if (ppm->primordial_verbose>0) {
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
double V,dV,ddV;
class_call(primordial_inflation_check_potential(ppm,ppm->phi_pivot,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
fprintf(stdout," -> 1st-order slow-roll prediction for A_s: %g\n",128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for T/S: %g\n",pow(dV/V,2)/_PI_);
fprintf(stdout," -> 1st-order slow-roll prediction for A_T: %g\n",pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for n_s: %g\n",1.-6./16./_PI_*pow(dV/V,2)+2./8./_PI_*(ddV/V));
fprintf(stdout," -> 1st-order slow-roll prediction for n_t: %g\n",-2./16./_PI_*pow(dV/V,2));
}
}
*/
/** - compute H_pivot at phi_pivot */
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
/** - check positivity and negative slope of potential in field pivot
value, and find value of phi_dot and H for field's pivot value,
assuming slow-roll attractor solution has been reached. If no
solution, code will stop there. */
if (ppm->primordial_verbose > 1)
printf(" (search attractor at pivot)\n");
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_pivot,
ppr->primordial_inflation_attractor_precision_pivot,
y,
dy,
&H_pivot,
&dphidt_pivot),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
break;
case inflation_H:
/** - check positivity and negative slope of \f$ H(\phi)\f$ in field pivot
value, and get H_pivot */
class_call_except(primordial_inflation_check_hubble(ppm,
ppm->phi_pivot,
&H_pivot,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
break;
default:
free(y);free(y_ini);free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - find a_pivot, value of scale factor when k_pivot crosses horizon while phi=phi_pivot */
a_pivot = ppm->k_pivot/H_pivot;
/** - integrate background solution starting from phi_pivot and until
k_max>>aH. This ensures that the inflationary model considered
here is valid and that the primordial spectrum can be
computed. Otherwise, if slow-roll brakes too early, model is not
suitable and run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration after phi_pivot=%e)\n",ppm->phi_pivot);
k_max = exp(ppm->lnk[ppm->lnk_size-1]);
aH_end = k_max/ppr->primordial_inflation_ratio_max;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = a_pivot*dphidt_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_end,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* we need to do the opposite: to check that there is an initial
time such that k_min << (aH)_ini. A guess is made by integrating
backward in time. This can be done exactly for inflation_H, or
only approximately for inflation_V (using the first-order
approximation to the attractor inflationary solution). However
this approximation is irrelevant because nevertheless, later on,
we compute the attractor solution at the initial time with high
accuracy, and then we integrate the background equations forward
in time. Hence the approximation made here introduces zero
mistake on the final result. It is just a way to find quickly a
reasonable initial phi value. In the inflation_V case, if the
exact forward integration reveals that the guess was not good
(i.e. does not correspond to "early enough"), we iterate over
sequences of backward/forward integration, until a correct time is
found. For potential such that no solution exists (no long-enough
slow-roll period before the pivot scale), the run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration before pivot)\n");
k_min = exp(ppm->lnk[0]);
aH_ini = k_min/ppr->primordial_inflation_ratio_min;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
counter = 0;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
do {
/* counter to avoid infinite loop */
counter ++;
class_test_except(counter >= ppr->primordial_inflation_phi_ini_maxit,
ppm->error_message,
free(y);free(y_ini);free(dy),
"when searching for an initial value of phi just before observable inflation takes place, could not converge after %d iterations. The potential does not allow eough inflationary e-folds before reaching the pivot scale",
counter);
/* try to find a value phi_try such that
aH=aH_ini*(ppr->primordial_inflation_aH_ini_target) (default:
aH_ini*0.9). But this is using the approximate backward
solution. So, anyway, we will check using the exact forward
solution that at this phi_try, we really have aH < aH_ini; if
this is not the case, we will iterate until a correct phi_try
is found. */
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini*ppr->primordial_inflation_aH_ini_target,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
phi_try = y[ppm->index_in_phi];
/* in inflation_V case, find the accurate attractor solution for
phi_ini', and then the correct value of a_ini, and finally of
dphi/dtau_ini */
/* find dphi/dt_ini (unlike dphi/dtau_ini, this does not depend on normalization of a) */
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* we need to normalize a properly so that a=a_pivot when
phi=phi_pivot. To do so, we evolve starting arbitrarily from
a_ini=1, and then we rescale a_ini appropriately. */
y[ppm->index_in_a] = 1.;
y[ppm->index_in_phi] = phi_try;
y[ppm->index_in_dphi] = y[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_pivot,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* now impose the correct a_ini */
a_try = a_pivot/y[ppm->index_in_a];
/* in case another iteration will be needed, set a new starting point for the routine primordial_inflation_evolve_background(...,backward) */
y[ppm->index_in_a] = a_try;
y[ppm->index_in_phi] = phi_try;
} while (a_try*H_try > aH_ini);
y_ini[ppm->index_in_a] = a_try;
y_ini[ppm->index_in_phi] = phi_try;
y_ini[ppm->index_in_dphi] = y_ini[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
break;
case inflation_H:
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
y_ini[ppm->index_in_a] = y[ppm->index_in_a];
y_ini[ppm->index_in_phi] = y[ppm->index_in_phi];
break;
default:
free(y);free(y_ini);free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - starting from this time, i.e. from y_ini[ ], we run the routine
which takes care of computing the primordial spectrum. */
if (ppm->primordial_verbose > 1)
printf(" (compute spectrum)\n");
if (ppm->behavior == numerical) {
class_call_except(primordial_inflation_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
}
else if (ppm->behavior == analytical) {
class_call_except(primordial_inflation_analytic_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
}
else {
class_stop(ppm->error_message,"Uncomprehensible value of the flag ppm->behavior=%d",ppm->behavior);
}
/** - before ending, we want to compute and store the values of \f$ \phi \f$
corresponding to k=aH for k_min and k_max */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
ppm->phi_min=y[ppm->index_in_phi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_max,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
ppm->phi_max=y[ppm->index_in_phi];
if (ppm->primordial_verbose > 1)
printf(" (observable power spectrum goes from %e to %e)\n",
ppm->phi_min,
ppm->phi_max);
/** - finally, we can de-allocate */
free(y);
free(y_ini);
free(dy);
return _SUCCESS_;
}
/**
* Routine for the computation of an analytic apporoximation to the
* the primordial spectrum. In general, should be used only for
* comparing with exact numerical computation performed by
* primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_analytic_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
double * y;
double * dy;
int index_k;
double k,phi_k;
double curvature,tensors;
double V,dV,ddV;
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
k = exp(ppm->lnk[index_k]);
/* evolve background until k=aH is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - read value of phi at time when k=aH */
phi_k = y[ppm->index_in_phi];
/** - get potential (and its derivatives) at this value */
class_call(primordial_inflation_check_potential(ppm,phi_k,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
/** - calculate the analytic slow-roll formula for the spectra */
curvature = 128.*_PI_/3.*pow(V,3)/pow(dV,2);
tensors = pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2);
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
}
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine with a loop over wavenumbers for the computation of the primordial
* spectrum. For each wavenumber it calls primordial_inflation_one_wavenumber()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
int index_k;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
#ifdef _OPENMP
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
abort = _FALSE_;
#pragma omp parallel shared(ppt,ppm,ppr,abort,y_ini) private(index_k,thread,tspent,tstart,tstop) num_threads(number_of_threads)
{
#ifdef _OPENMP
thread = omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(primordial_inflation_one_wavenumber(ppt,ppm,ppr,y_ini,index_k),
ppm->error_message,
ppm->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
}
#ifdef _OPENMP
if (ppm->primordial_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,thread);
#endif
} /* end of parallel zone */
if (abort == _TRUE_) return _FAILURE_;
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine coordinating the computation of the primordial
* spectrum for one wavenumber. It calls primordial_inflation_one_k() to
* integrate the perturbation equations, and then it stores the result
* for the scalar/tensor spectra.
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @param index_k Input: index of wavenumber to be considered
* @return the error status
*/
int primordial_inflation_one_wavenumber(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini,
int index_k
) {
double k;
double curvature,tensors;
double * y;
double * dy;
k = exp(ppm->lnk[index_k]);
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - evolve the background until the relevant initial time for
integrating perturbations */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k/ppr->primordial_inflation_ratio_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - evolve the background/perturbation equations from this time and
until some time after Horizon crossing */
class_call(primordial_inflation_one_k(ppm,
ppr,
k,
y,
dy,
&curvature,
&tensors),
ppm->error_message,
ppm->error_message);
free(y);
free(dy);
class_test(curvature<=0.,
ppm->error_message,
"negative curvature spectrum");
class_test(tensors<=0.,
ppm->error_message,
"negative tensor spectrum");
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
/* uncomment if you want to print here the spectra for testing */
/* fprintf(stderr,"%e %e %e\n", */
/* ppm->lnk[index_k], */
/* ppm->lnpk[ppt->index_md_scalars][index_k], */
/* ppm->lnpk[ppt->index_md_tensors][index_k]); */
return _SUCCESS_;
}
/**
* Routine integrating the background plus perturbation equations for
* each wavenumber, and returning the scalar and tensor spectrum.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param k Input: Fourier wavenumber
* @param y Input: running vector of background/perturbations, already allocated and initialized
* @param dy Input: running vector of background/perturbation derivatives, already allocated
* @param curvature Output: curvature perturbation
* @param tensor Output: tensor perturbation
* @return the error status
*/
int primordial_inflation_one_k(
struct primordial * ppm,
struct precision * ppr,
double k,
double * y,
double * dy,
double * curvature,
double * tensor
) {
/** Summary: */
/** - define local variables */
double tau_start,tau_end,dtau;
double z,ksi2,ah2;
double aH;
double curvature_old;
double curvature_new;
double dlnPdN;
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
/** - initialize the generic integrator (same integrator already used
in background, thermodynamics and perturbation modules) */
pipaw.ppm = ppm;
pipaw.N = ppm->in_size;
pipaw.integrate = forward;
pipaw.time = conformal;
pipaw.k = k;
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* initial conditions for the perturbations, Bunch-Davies vacuum */
y[ppm->index_in_ksi_re]=1./sqrt(2.*k);
y[ppm->index_in_ksi_im]=0.;
y[ppm->index_in_dksi_re]=0.;
y[ppm->index_in_dksi_im]=-k*y[ppm->index_in_ksi_re];
y[ppm->index_in_ah_re]=1./sqrt(2.*k);
y[ppm->index_in_ah_im]=0.;
y[ppm->index_in_dah_re]=0.;
y[ppm->index_in_dah_im]=-k*y[ppm->index_in_ah_re];
/** - initialize variable used for deciding when to stop the calculation (= when the curvature remains stable) */
curvature_new = _HUGE_;
/** - initialize conformal time to arbitrary value (here, only variations
of tau matter: the equations that we integrate do not depend
explicitly on time) */
tau_end = 0;
/** - compute derivative of initial vector and infer first value of adaptive time-step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/** - loop over time */
do {
/* new time interval [tau_start, tau_end] over which equations will be integrated */
tau_start = tau_end;
tau_end = tau_start + dtau;
class_test(dtau/tau_start < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
/* evolve the system */
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* compute derivatives at tau_end, useful to infer new time step and spectra */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* new time step */
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/* new aH */
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
/* store previous value of curvature (at tau_start) */
curvature_old = curvature_new;
/* new curvature */
z = y[ppm->index_in_a]*dy[ppm->index_in_phi]/aH;
ksi2 = y[ppm->index_in_ksi_re]*y[ppm->index_in_ksi_re]+y[ppm->index_in_ksi_im]*y[ppm->index_in_ksi_im];
curvature_new = k*k*k/2./_PI_/_PI_*ksi2/z/z;
/* variation of curvature with time (dimensionless) */
dlnPdN = (curvature_new-curvature_old)/dtau*y[ppm->index_in_a]/dy[ppm->index_in_a]/curvature_new;
/* stop when (k >> aH) AND curvature is stable */
} while ((k/aH >= ppr->primordial_inflation_ratio_max) || (fabs(dlnPdN) > ppr->primordial_inflation_tol_curvature));
/** - clean the generic integrator */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/** - store final value of curvature for this wavenumber */
*curvature = curvature_new;
/** - store final value of tensor perturbation for this wavenumber */
ah2 = y[ppm->index_in_ah_re]*y[ppm->index_in_ah_re]+y[ppm->index_in_ah_im]*y[ppm->index_in_ah_im];
*tensor = 32.*k*k*k/_PI_*ah2/y[ppm->index_in_a]/y[ppm->index_in_a];
//fprintf(stdout,"%g %g %g %g %g\n",k,*curvature,*tensor,*tensor/(*curvature),dlnPdN);
return _SUCCESS_;
}
/**
* Routine searching for the inflationary attractor solution at a
* given phi_0, by iterations, with a given tolerance. If no solution
* found within tolerance, returns error message. The principle is the
* following. The code starts integrating the background equations
* from various values of phi, corresponding to earlier and earlier
* value before phi_0, and separated by a small arbitrary step size,
* corresponding roughly to 1 e-fold of inflation. Each time, the
* integration starts with the initial condition \f$ \phi=-V'/3H\f$ (slow-roll
* prediction). If the found value of \f$\phi'\f$ in phi_0 is stable (up to
* the parameter "precision"), the code considers that there is an
* attractor, and stops iterating. If this process does not converge,
* it returns an error message.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param phi_0 Input: field value at which we wish to find the solution
* @param precision Input: tolerance on output values (if too large, an attractor will always considered to be found)
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param H_0 Output: Hubble value at phi_0 for attractor solution
* @param dphidt_0 Output: field derivative value at phi_0 for attractor solution
* @return the error status
*/
int primordial_inflation_find_attractor(
struct primordial * ppm,
struct precision * ppr,
double phi_0,
double precision,
double * y,
double * dy,
double * H_0,
double * dphidt_0
) {
double V_0,dV_0,ddV_0;
double V=0.,dV=0.,ddV=0.;
double a;
double dphidt,dphidt_0new,dphidt_0old,phi;
int counter;
/* we want a series of value of phi' in phi_0, obtained by
integrating the system from earlier and earlier time. The first
value iof the series is the slow-roll prediction phi' =
-V'/3H. The following lines compute this value and initialize relevant quantities. */
class_call(primordial_inflation_check_potential(ppm,phi_0,&V_0,&dV_0,&ddV_0),
ppm->error_message,
ppm->error_message);
dphidt_0new = -dV_0/3./sqrt((8.*_PI_/3.)*V_0);
phi = phi_0;
counter = 0;
dphidt_0old = dphidt_0new/(precision+2.); // this silly value just
// ensures that the loop
// below will be executed
// at least once.
/* loop over different values of phi, from which the background
equations are integrated until phi_0 */
while (fabs(dphidt_0new/dphidt_0old-1.) >= precision) {
counter ++;
class_test(counter >= ppr->primordial_inflation_attractor_maxit,
ppm->error_message,
"could not converge after %d iterations: there exists no attractor solution near phi=%g. Potential probably too steep in this region, or precision parameter primordial_inflation_attractor_precision=%g too small",
counter,
phi_0,
precision);
dphidt_0old = dphidt_0new;
/* take one step in phi, corresponding roughly to adding one more
e-fold of inflation */
phi=phi+dV_0/V_0/16./_PI_;
/* fix the initial phi' to the slow-roll prediction in that point,
and initialize other relevant quantities */
class_call(primordial_inflation_check_potential(ppm,phi,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
a = 1.;
dphidt = -dV/3./sqrt((8.*_PI_/3.)*V);
y[ppm->index_in_a]=a;
y[ppm->index_in_phi]=phi;
y[ppm->index_in_dphi]=a*dphidt;
/* evolve the background equations until phi_0 is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
phi_0,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/* compute phi' in phi_0, this is the new point in the series
which convergence we want to check */
dphidt_0new = y[ppm->index_in_dphi]/y[ppm->index_in_a];
}
/* if we have converged and found the attractor, we take the last
value of phi' in phi_0 to be the correct one for the attractor
solution */
*dphidt_0 = dphidt_0new;
*H_0 = sqrt((8.*_PI_/3.)*(0.5*dphidt_0new*dphidt_0new+V_0));
if (ppm->primordial_verbose > 1) {
printf(" (attractor found in phi=%g with phi'=%g, H=%g)\n",phi_0,*dphidt_0,*H_0);
}
return _SUCCESS_;
}
/**
* Routine integrating background equations only, from initial values
* stored in y, to a final value (if target = _aH_, until aH =
* aH_stop; if target = _phi_, till phi = phi_stop; if target =
* _end_inflation_, until \f$ d^2a/dt^2 = 0\f$ (here t = proper time)). In
* output, y contains the final background values. In addition, if
* check_epsilon is true, the routine controls at each step that the
* expansion is accelerated and that inflation holds (wepsilon>1),
* otherwise it returns an error. Thanks to the last argument, it is
* also possible to specify whether the integration should be carried
* forward or backward in time. For the inflation_H case, only a 1st
* order differential equation is involved, so the forward and
* backward case can be done exactly without problems. For the
* inflation_V case, the equation of motion is 2nd order. What the
* module will do in the backward case is to search for an approximate
* solution, corresponding to the (first-order) attractor inflationary
* solution. This approximate backward solution is used in order to
* estimate some initial times, but the approximation made here will
* never impact the final result: the module is written in such a way
* that after using this approximation, the code always computes (and
* relies on) the exact forward solution.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param target Input: whether the goal is to reach a given aH or \f$ \phi \f$
* @param stop Input: the target value of either aH or \f$ \phi \f$
* @param check_epsilon Input: whether we should impose inflation (epsilon>1) at each step
* @param direction Input: whether we should integrate forward or backward in time
* @param time Input: definition of time (proper or conformal)
* @return the error status
*/
int primordial_inflation_evolve_background(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy,
enum target_quantity target,
double stop,
short check_epsilon,
enum integration_direction direction,
enum time_definition time
) {
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
double tau_start,tau_end,dtau=0.;
double H,dH,ddH,dddH;
double epsilon,epsilon_old;
double quantity=0.;
double V,dV,ddV;
double sign_dtau=0.;
pipaw.ppm = ppm;
pipaw.N = ppm->in_bg_size;
if ((direction == backward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
// -1 to remove the differential equation for phi', since we stick to the attractor
pipaw.N -= 1;
}
pipaw.integrate = direction;
pipaw.time = time;
switch (direction) {
case forward:
sign_dtau = 1.;
break;
case backward:
sign_dtau = -1.;
break;
}
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* at starting point, compute eventually epsilon */
if (check_epsilon == _TRUE_) {
class_call(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message);
}
/* at starting point, compute the stepsize dtau */
tau_end = 0;
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// in this case, the goal is to reach d2a/dt2 = 0 (end of accelerated expansion)
stop = 0.;
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
// check that we are in the right case
class_test(ppm->primordial_spec_type != inflation_V_end,
ppm->error_message,
"the target _end_inflation_ is only coded to work with inflation_V_end (but could be generalized if needed)");
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
/* loop over time steps, checking that there will be no overshooting */
while (sign_dtau*(quantity - stop) < 0.) {
/* check that V(phi) or H(phi) do not take forbidden values
(negative or positive derivative) */
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_check_potential(ppm,
y[ppm->index_in_phi],
&V,
&dV,
&ddV),
ppm->error_message,
ppm->error_message);
}
else {
class_call(primordial_inflation_check_hubble(ppm,
y[ppm->index_in_phi],
&H,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message);
}
/* take one time step */
tau_start = tau_end;
tau_end = tau_start + dtau;
// mind the fabs(...) below (works for both forward and backward integration)
class_test(fabs(dtau/tau_start) < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* eventually, check that epsilon is not becoming greater than one */
if (check_epsilon == _TRUE_) {
epsilon_old = epsilon;
class_call_except(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message,
cleanup_generic_integrator(&gi));
class_test_except((epsilon > 1) && (epsilon_old <= 1),
ppm->error_message,
cleanup_generic_integrator(&gi),
"Inflaton evolution crosses the border from epsilon<1 to epsilon>1 at phi=%g. Inflation disrupted during the observable e-folds",
y[ppm->index_in_phi]);
}
/* recompute new value of next conformal time step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
}
/* won't use the integrator anymore */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/* Perform one last step with a simple trapezoidal integral. This
will bring exactly phi or a forward to phi_stop or a_stop, or
approximately aH forward to aH_stop, or approximately [-d2a/dt2
/a] backward to zero. */
switch (target) {
case _aH_:
switch (time){
case proper:
dtau = (stop/dy[ppm->index_in_a]-1.)/dy[ppm->index_in_a];
break;
case conformal:
dtau = (stop/(dy[ppm->index_in_a]/y[ppm->index_in_a])-1.)/(dy[ppm->index_in_a]/y[ppm->index_in_a]);
break;
}
break;
case _phi_:
dtau = (stop-y[ppm->index_in_phi])/dy[ppm->index_in_phi];
break;
case _end_inflation_:
class_call(primordial_inflation_check_potential(ppm,y[ppm->index_in_phi],&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
// We can easily pull back quantity=-d2a/dt2 /a by noticing that
// d(quantity)/dtau = 8piG phi' phi'' / a^2 (exact relation!)
// or
// d(quantity)/dtau = 8piG phi^dot (a phi^dot)^dot = 8piG phi^dot (a^dot phi^dot+ a phi^dotdot)
// By taking the step dtau = - quantity / [d(quantity)/dtau] we nearly reach quantity=0 (end of inflation), up to very good approximation
switch (time){
case proper:
dtau = -quantity/(8.*_PI_*dy[ppm->index_in_phi]*(dy[ppm->index_in_a]*dy[ppm->index_in_phi]+y[ppm->index_in_a]*dy[ppm->index_in_dphi]));
break;
case conformal:
dtau = -quantity/(8.*_PI_/y[ppm->index_in_a]/y[ppm->index_in_a]*dy[ppm->index_in_phi]*dy[ppm->index_in_dphi]);
break;
}
break;
case _a_:
dtau = (stop-y[ppm->index_in_a])/dy[ppm->index_in_a];
break;
}
y[ppm->index_in_a] += dy[ppm->index_in_a]*dtau;
y[ppm->index_in_phi] += dy[ppm->index_in_phi]*dtau;
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V)||(ppm->primordial_spec_type == inflation_V_end)))
y[ppm->index_in_dphi] += dy[ppm->index_in_dphi]*dtau;
// this last step updates also the dy[]
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// uncomment if you want to test that the routine really reached the point at which d2a/dt2=0
/*
if (target == _end_inflation_) {
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
quantity = (-aH*aH + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi])/y[ppm->index_in_a]/y[ppm->index_in_a];
if (ppm->primordial_verbose>1)
printf(" (-d2a/dt2 /a = %e)\n",quantity);
}
*/
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of potential. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during inflation. So
* the slope had to be always negative or always positive... we took
* the first option.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_check_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
class_call(primordial_inflation_potential(ppm,phi,V,dV,ddV),
ppm->error_message,
ppm->error_message);
class_test(*V <= 0.,
ppm->error_message,
"This potential becomes negative at phi=%g, before the end of observable inflation. It cannot be treated by this code",
phi);
class_test(*dV >= 0.,
ppm->error_message,
"All the code is written for the case dV/dphi<0. Here, in phi=%g, we have dV/dphi=%g. This potential cannot be treated by this code",
phi,*dV);
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of \f$ H(\phi)\f$. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during
* inflation. And H can only decrease with time. So the slope \f$ dH/d\phi\f$
* has to be always negative or always positive... we took the first
* option: phi increases, H decreases.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_check_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
class_call(primordial_inflation_hubble(ppm,
phi,
H,dH,ddH,dddH),
ppm->error_message,
ppm->error_message);
class_test(*H < 0.,
ppm->error_message,
"this H(phi) is not physical. H = %e",
*H);
class_test(*dH > 0.,
ppm->error_message,
"this H(phi) is not decreasing with growing phi. dH/dphi = %e",
*dH);
return _SUCCESS_;
}
/**
* Routine computing the first slow-roll parameter epsilon
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to compute epsilon
* @param epsilon Output: result
* @return the error status
*/
int primordial_inflation_get_epsilon(
struct primordial * ppm,
double phi,
double * epsilon
) {
double V,dV,ddV;
double H,dH,ddH,dddH;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
phi,
&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
*epsilon = 1./16./_PI_*pow(dV/V,2);
//*eta = 1./8./pi*(ddV/V)
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
phi,
&H,&dH,&ddH,&dddH),
ppm->error_message,
ppm->error_message);
*epsilon = 1./4./_PI_*pow(dH/H,2);
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
return _SUCCESS_;
}
/**
* Routine searching phi_pivot when a given amount of inflation is requested.
*
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @return the error status
*/
int primordial_inflation_find_phi_pivot(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy
) {
/** Summary: */
/** - define local variables */
double epsilon,dphi;
double phi_try,H_try,dphidt_try,ratio_try=0.;
double phi_left,phi_right,phi_mid;
double phi_small_epsilon,phi_stop;
double dphidt_small_epsilon;
double H_small_epsilon;
double aH_ratio_after_small_epsilon=0.;
double a_ratio_after_small_epsilon=0.;
double target=0.;
double a_pivot,aH_pivot;
double rho_end;
double h;
double H0;
double rho_c0;
double sigma_B;
double Omega_g0;
double Omega_r0;
/** - check whether in vicinity of phi_end, inflation is still ongoing */
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-ppr->primordial_inflation_end_dphi,&epsilon),
ppm->error_message,
ppm->error_message);
/** - case in which epsilon>1: hence we must find the value phi_stop <
phi_end where inflation ends up naturally */
if (epsilon > 1.) {
// assume that inflation ends up naturally
/** - --> find latest value of the field such that epsilon = primordial_inflation_small_epsilon (default: 0.1) */
/** - --> bracketing right-hand value is phi_end (but the potential will not be evaluated exactly there, only closeby */
phi_right = ppm->phi_end;
/** - --> bracketing left-hand value is found by iterating with logarithmic step until epsilon < primordial_inflation_small_epsilon */
dphi = ppr->primordial_inflation_end_dphi;
do {
dphi *= ppr->primordial_inflation_end_logstep;
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-dphi,&epsilon),
ppm->error_message,
ppm->error_message);
} while (epsilon > ppr->primordial_inflation_small_epsilon);
phi_left = ppm->phi_end-dphi;
/** - --> find value such that epsilon = primordial_inflation_small_epsilon by bisection */
do {
phi_mid = 0.5*(phi_left+phi_right);
class_call(primordial_inflation_get_epsilon(ppm,phi_mid,&epsilon),
ppm->error_message,
ppm->error_message);
if (epsilon < ppr->primordial_inflation_small_epsilon) phi_left=phi_mid;
else phi_right=phi_mid;
} while (fabs(epsilon-ppr->primordial_inflation_small_epsilon) > ppr->primordial_inflation_small_epsilon_tol);
/** - --> value found and stored as phi_small_epsilon */
phi_small_epsilon = phi_mid;
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_small_epsilon,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> compute amount of inflation between this phi_small_epsilon and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
y[ppm->index_in_dphi]=y[ppm->index_in_a]*dphidt_small_epsilon;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
// we have used here conformal time, so aH = dy[a]/y[a]
aH_ratio_after_small_epsilon = dy[ppm->index_in_a]/y[ppm->index_in_a]/H_small_epsilon;
a_ratio_after_small_epsilon = y[ppm->index_in_a];
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
/* get the target value of ln_aH_ratio */
rho_end = 2./8./_PI_*pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2);
rho_end = 8*_PI_/3.*rho_end/(_G_*_h_P_/pow(_c_,3))*pow(_Mpc_over_m_,2);
h = 0.7;
H0 = h * 1.e5 / _c_;
rho_c0 = pow(H0,2);
sigma_B = 2. * pow(_PI_,5) * pow(_k_B_,4) / 15. / pow(_h_P_,3) / pow(_c_,2);
Omega_g0 = (4.*sigma_B/_c_*pow(2.726,4.)) / (3.*_c_*_c_*1.e10*h*h/_Mpc_over_m_/_Mpc_over_m_/8./_PI_/_G_);
Omega_r0 = 3.046*7./8.*pow(4./11.,4./3.)*Omega_g0;
target = log(H0/0.05*pow(Omega_r0,0.5)*pow(2./100.,1./12.)*pow(rho_end/rho_c0,0.25));
//fprintf(stderr,"auto: log(aH_end/aH_*)=%e\n",target);
break;
case ln_aH_ratio:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(aH_end/aH_*)=%e\n",target);
break;
case N_star:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(a_end/a_*)=%e\n",target);
break;
}
/** - --> by starting from phi_small_epsilon and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/* we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/* - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
/** - case in which epsilon<1: */
else {
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_end,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> by starting from phi_end and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= ppm->phi_end;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/** - --> we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/** - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
return _SUCCESS_;
}
/**
* Routine returning derivative of system of background/perturbation
* variables. Like other routines used by the generic integrator
* (background_derivs, thermodynamics_derivs, perturb_derivs), this
* routine has a generic list of arguments, and a slightly different
* error management, with the error message returned directly in an
* ErrMsg field.
*
* @param tau Input: time (not used explicitly inside the routine, but requested by the generic integrator)
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param parameters_and_workspace Input: all necessary input variables apart from y
* @param error_message Output: error message
* @return the error status
*/
int primordial_inflation_derivs(
double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct primordial_inflation_parameters_and_workspace * ppipaw;
struct primordial * ppm;
ppipaw = parameters_and_workspace;
ppm = ppipaw->ppm;
// a2
ppipaw->a2=y[ppm->index_in_a]*y[ppm->index_in_a];
// BACKGROUND
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
y[ppm->index_in_phi],
&(ppipaw->V),
&(ppipaw->dV),
&(ppipaw->ddV)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->integrate) {
case forward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->a2*ppipaw->V));
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dtau
dy[ppm->index_in_dphi]=-2.*ppipaw->aH*y[ppm->index_in_dphi]-ppipaw->a2*ppipaw->dV;
break;
case proper:
// a H = adot
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->V));
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dt
dy[ppm->index_in_dphi]=-3.*ppipaw->aH/y[ppm->index_in_a]*y[ppm->index_in_dphi]-ppipaw->dV;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z=
2*ppipaw->aH*ppipaw->aH
- ppipaw->a2*ppipaw->ddV
- 4.*_PI_*(7.*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]
+4.*y[ppm->index_in_dphi]/ppipaw->aH*ppipaw->a2*ppipaw->dV)
+32.*_PI_*_PI_*pow(y[ppm->index_in_dphi],4)/pow(ppipaw->aH,2);
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a=2.*ppipaw->aH*ppipaw->aH - 4.*_PI_*y[ppm->index_in_dphi]*y[ppm->index_in_dphi];
break;
// For backward integration of approximate slow-roll solution:
// Neglect kinetic energy of the field phi'^2/(2a^2) w.r.t. potential energy V
// Neglect phi'' w.r.t 2aHphi', reducing 2nd order Klein-Gordon to approximate 1st-order
case backward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*ppipaw->a2*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->a2*ppipaw->dV/3./ppipaw->aH;
break;
case proper:
// a H = da/dt
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->dV/3./ppipaw->aH*y[ppm->index_in_a];
break;
}
break;
}
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
y[ppm->index_in_phi],
&(ppipaw->H),
&(ppipaw->dH),
&(ppipaw->ddH),
&(ppipaw->dddH)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->time) {
case conformal:
// 1: a
dy[ppm->index_in_a]=ppipaw->a2*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*y[ppm->index_in_a]*ppipaw->dH;
break;
case proper:
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*ppipaw->dH;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z =
2. *ppipaw->a2*ppipaw->H*ppipaw->H
-3./4./_PI_ *ppipaw->a2*ppipaw->H*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->ddH*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->dH*ppipaw->dddH
-1./4./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->ddH/ppipaw->H
+1./2./_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH
+1./8./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->dH*ppipaw->dH/ppipaw->H/ppipaw->H;
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a = 2.*ppipaw->a2*ppipaw->H*ppipaw->H
-4.*_PI_*dy[ppm->index_in_phi]*dy[ppm->index_in_phi];
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
if (ppipaw->N <= ppm->in_bg_size) // mind the <= instead of ==, necessary because for backward integration 1 equation is removed
return _SUCCESS_;
// PERTURBATIONS
class_test(ppipaw->time == proper,
ppm->error_message,
"For inflaton perturbations, only conformal time is coded.");
// SCALARS
// 4: ksi_re
dy[ppm->index_in_ksi_re]=y[ppm->index_in_dksi_re];
// 5: ksi_im
dy[ppm->index_in_ksi_im]=y[ppm->index_in_dksi_im];
// 6: d ksi_re / dtau
dy[ppm->index_in_dksi_re]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_re];
// 7: d ksi_im / dtau
dy[ppm->index_in_dksi_im]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_im];
// TENSORS
// 8: ah_re
dy[ppm->index_in_ah_re]=y[ppm->index_in_dah_re];
// 9: ah_im
dy[ppm->index_in_ah_im]=y[ppm->index_in_dah_im];
// 10: d ah_re / dtau
dy[ppm->index_in_dah_re]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_re];
// 11: d ah_im / dtau
dy[ppm->index_in_dah_im]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_im];
return _SUCCESS_;
}
/**
* This routine reads the primordial spectrum from an external command,
* and stores the tabulated values.
* The sampling of the k's given by the external command is preserved.
*
* Author: Jesus Torrado (torradocacho@lorentz.leidenuniv.nl)
* Date: 2013-12-20
*
* @param ppt Input/output: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_external_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
char arguments[_ARGUMENT_LENGTH_MAX_];
char line[_LINE_LENGTH_MAX_];
char command_with_arguments[2*_ARGUMENT_LENGTH_MAX_];
FILE *process;
int n_data_guess, n_data = 0;
double *k = NULL, *pks = NULL, *pkt = NULL, *tmp = NULL;
double this_k, this_pks, this_pkt;
int status;
int index_k;
/** - Initialization */
/* Prepare the data (with some initial size) */
n_data_guess = 100;
k = (double *)malloc(n_data_guess*sizeof(double));
pks = (double *)malloc(n_data_guess*sizeof(double));
if (ppt->has_tensors == _TRUE_)
pkt = (double *)malloc(n_data_guess*sizeof(double));
/* Prepare the command */
/* If the command is just a "cat", no arguments need to be passed */
if(strncmp("cat ", ppm->command, 4) == 0) {
sprintf(arguments, " ");
}
/* otherwise pass the list of arguments */
else {
sprintf(arguments, " %g %g %g %g %g %g %g %g %g %g",
ppm->custom1, ppm->custom2, ppm->custom3, ppm->custom4, ppm->custom5,
ppm->custom6, ppm->custom7, ppm->custom8, ppm->custom9, ppm->custom10);
}
/* write the actual command in a string */
sprintf(command_with_arguments, "%s %s", ppm->command, arguments);
if (ppm->primordial_verbose > 0)
printf(" -> running: %s\n",command_with_arguments);
/** - Launch the command and retrieve the output */
/* Launch the process */
process = popen(command_with_arguments, "r");
class_test(process == NULL,
ppm->error_message,
"The program failed to set the environment for the external command. Maybe you ran out of memory.");
/* Read output and store it */
while (fgets(line, sizeof(line)-1, process) != NULL) {
if (ppt->has_tensors == _TRUE_) {
sscanf(line, "%lf %lf %lf", &this_k, &this_pks, &this_pkt);
}
else {
sscanf(line, "%lf %lf", &this_k, &this_pks);
}
/* Standard technique in C: if too many data, double the size of the vectors */
/* (it is faster and safer that reallocating every new line) */
if((n_data+1) > n_data_guess) {
n_data_guess *= 2;
tmp = (double *)realloc(k, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
k = tmp;
tmp = (double *)realloc(pks, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pks = tmp;
if (ppt->has_tensors == _TRUE_) {
tmp = (double *)realloc(pkt, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pkt = tmp;
};
};
/* Store */
k [n_data] = this_k;
pks[n_data] = this_pks;
if (ppt->has_tensors == _TRUE_) {
pkt[n_data] = this_pkt;
}
n_data++;
/* Check ascending order of the k's */
if(n_data>1) {
class_test(k[n_data-1] <= k[n_data-2],
ppm->error_message,
"The k's are not strictly sorted in ascending order, "
"as it is required for the calculation of the splines.\n");
}
}
/* Close the process */
status = pclose(process);
class_test(status != 0.,
ppm->error_message,
"The attempt to launch the external command was unsuccessful. "
"Try doing it by hand to check for errors.");
/* Test limits of the k's */
class_test(k[1] > ppt->k_min,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points before the minimum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_min);
class_test(k[n_data-2] < ppt->k_max,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points after the maximum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_max);
/** - Store the read results into CLASS structures */
ppm->lnk_size = n_data;
/** - Make room */
class_realloc(ppm->lnk,
ppm->lnk,
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->lnpk[ppt->index_md_scalars],
ppm->lnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_scalars],
ppm->ddlnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
if (ppt->has_tensors == _TRUE_) {
class_realloc(ppm->lnpk[ppt->index_md_tensors],
ppm->lnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_tensors],
ppm->ddlnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
};
/** - Store values */
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
ppm->lnk[index_k] = log(k[index_k]);
ppm->lnpk[ppt->index_md_scalars][index_k] = log(pks[index_k]);
if (ppt->has_tensors == _TRUE_)
ppm->lnpk[ppt->index_md_tensors][index_k] = log(pkt[index_k]);
/* DEBUG (with tensors)
fprintf(stderr,"Storing[%d(+1) of %d]: \n k = %g == %g\n pks = %g == %g\n pkt = %g == %g\n",
index_k, n_data,
ppm->lnk[index_k], log(k[index_k]),
ppm->lnpk[ppt->index_md_scalars][index_k], log(pks[index_k]),
ppm->lnpk[ppt->index_md_tensors][index_k], log(pkt[index_k]));
*/
};
/** - Release the memory used locally */
free(k);
free(pks);
if (ppt->has_tensors == _TRUE_)
free(pkt);
/** - Tell CLASS that there are scalar (and tensor) modes */
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
if (ppt->has_tensors == _TRUE_)
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
int primordial_output_titles(struct perturbs * ppt,
struct primordial * ppm,
char titles[_MAXTITLESTRINGLENGTH_]
){
class_store_columntitle(titles,"k [1/Mpc]",_TRUE_);
class_store_columntitle(titles,"P_scalar(k)",_TRUE_);
class_store_columntitle(titles,"P_tensor(k)",ppt->has_tensors);
return _SUCCESS_;
}
int primordial_output_data(struct perturbs * ppt,
struct primordial * ppm,
int number_of_titles,
double *data){
int index_k, storeidx;
double *dataptr;
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
dataptr = data + index_k*number_of_titles;
storeidx = 0;
class_store_double(dataptr, exp(ppm->lnk[index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_scalars][index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_tensors][index_k]), ppt->has_tensors,storeidx);
}
return _SUCCESS_;
}
|
testing_dtrsm.c | /**
*
* @file testing_dtrsm.c
*
* PLASMA testing routines
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mathieu Faverge
* @date 2010-11-15
* @generated d Tue Jan 7 11:45:18 2014
*
**/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <plasma.h>
#include <cblas.h>
#include <lapacke.h>
#include <core_blas.h>
#include "testing_dmain.h"
#undef COMPLEX
#define REAL
static int check_solution(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag,
int M, int N, double alpha,
double *A, int LDA,
double *Bref, double *Bplasma, int LDB);
int testing_dtrsm(int argc, char **argv)
{
/* Check for number of arguments*/
if ( argc != 5 ) {
USAGE("TRSM", "alpha M N LDA LDB",
" - alpha : alpha coefficient\n"
" - M : number of rows of matrices B\n"
" - N : number of columns of matrices B\n"
" - LDA : leading dimension of matrix A\n"
" - LDB : leading dimension of matrix B\n");
return -1;
}
double alpha = (double) atol(argv[0]);
int M = atoi(argv[1]);
int N = atoi(argv[2]);
int LDA = atoi(argv[3]);
int LDB = atoi(argv[4]);
double eps;
int info_solution;
int s, u, t, d, i;
int LDAxM = LDA*max(M,N);
int LDBxN = LDB*max(M,N);
double *A = (double *)malloc(LDAxM*sizeof(double));
#pragma omp register([LDAxM]A)
double *B = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]B)
double *Binit = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]Binit)
double *Bfinal = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]Bfinal)
/* Check if unable to allocate memory */
if ( (!A) || (!B) || (!Binit) || (!Bfinal)){
printf("Out of Memory \n ");
return -2;
}
eps = LAPACKE_dlamch_work('e');
printf("\n");
printf("------ TESTS FOR PLASMA DTRSM ROUTINE ------- \n");
printf(" Size of the Matrix B : %d by %d\n", M, N);
printf("\n");
printf(" The matrix A is randomly generated for each test.\n");
printf("============\n");
printf(" The relative machine precision (eps) is to be %e \n",eps);
printf(" Computational tests pass if scaled residuals are less than 10.\n");
/*----------------------------------------------------------
* TESTING DTRSM
*/
/* Initialize A, B, C */
LAPACKE_dlarnv_work(IONE, ISEED, LDAxM, A);
LAPACKE_dlarnv_work(IONE, ISEED, LDBxN, B);
for(i=0;i<max(M,N);i++)
A[LDA*i+i] = A[LDA*i+i] + 2.0;
for (s=0; s<2; s++) {
for (u=0; u<2; u++) {
#ifdef COMPLEX
for (t=0; t<3; t++) {
#else
for (t=0; t<2; t++) {
#endif
for (d=0; d<2; d++) {
memcpy(Binit, B, LDBxN*sizeof(double));
memcpy(Bfinal, B, LDBxN*sizeof(double));
/* PLASMA DTRSM */
PLASMA_dtrsm(side[s], uplo[u], trans[t], diag[d],
M, N, alpha, A, LDA, Bfinal, LDB);
/* Check the solution */
info_solution = check_solution(side[s], uplo[u], trans[t], diag[d],
M, N, alpha, A, LDA, Binit, Bfinal, LDB);
printf("***************************************************\n");
if (info_solution == 0) {
printf(" ---- TESTING DTRSM (%s, %s, %s, %s) ...... PASSED !\n",
sidestr[s], uplostr[u], transstr[t], diagstr[d]);
}
else {
printf(" ---- TESTING DTRSM (%s, %s, %s, %s) ... FAILED !\n",
sidestr[s], uplostr[u], transstr[t], diagstr[d]);
}
printf("***************************************************\n");
}
}
}
}
free(A); free(B);
free(Binit); free(Bfinal);
return 0;
}
/*--------------------------------------------------------------
* Check the solution
*/
static int check_solution(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag,
int M, int N, double alpha,
double *A, int LDA,
double *Bref, double *Bplasma, int LDB)
{
int info_solution;
double Anorm, Binitnorm, Bplasmanorm, Blapacknorm, Rnorm, result;
double eps;
double mzone = (double)-1.0;
double *work = (double *)malloc(max(M, N)* sizeof(double));
int Am, An;
if (side == PlasmaLeft) {
Am = M; An = M;
} else {
Am = N; An = N;
}
Anorm = LAPACKE_dlantr_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), lapack_const(uplo), lapack_const(diag),
Am, An, A, LDA, work);
Binitnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
Bplasmanorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bplasma, LDB, work);
cblas_dtrsm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
(CBLAS_DIAG)diag, M, N, (alpha), A, LDA, Bref, LDB);
Blapacknorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
cblas_daxpy(LDB * N, (mzone), Bplasma, 1, Bref, 1);
Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
eps = LAPACKE_dlamch_work('e');
printf("Rnorm %e, Anorm %e, Binitnorm %e, Bplasmanorm %e, Blapacknorm %e\n",
Rnorm, Anorm, Binitnorm, Bplasmanorm, Blapacknorm);
result = Rnorm / ((Anorm + Blapacknorm) * max(M,N) * eps);
printf("============\n");
printf("Checking the norm of the difference against reference DTRSM \n");
printf("-- ||Cplasma - Clapack||_oo/((||A||_oo+||B||_oo).N.eps) = %e \n", result);
if ( isinf(Blapacknorm) || isinf(Bplasmanorm) || isnan(result) || isinf(result) || (result > 10.0) ) {
printf("-- The solution is suspicious ! \n");
info_solution = 1;
}
else {
printf("-- The solution is CORRECT ! \n");
info_solution= 0 ;
}
free(work);
return info_solution;
}
|
scheduled-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
char* printEnum (omp_sched_t type) {
char * ret;
if (type == omp_sched_static)
ret = "Static";
else if (type == omp_sched_dynamic)
ret = "Dynamic";
else if (type == omp_sched_guided)
ret = "Guided";
else if (type == omp_sched_auto)
ret = "Auto";
return ret;
}
int main(int argc, char **argv) {
int i, n=16,chunk,a[n],suma=0, printed = 0, chunk_read;
omp_sched_t sched_type;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++)
a[i] = i;
omp_get_schedule(&sched_type, &chunk_read);
printf("\n\tFuera del bucle:\n dyn var: %d\n nthreads-var: %d\n thread-limit-var: %d\n run-shed-var: %s --- %d\n\n",
omp_get_dynamic(), omp_get_max_threads(), omp_get_thread_limit(),
printEnum(sched_type), chunk_read);
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
#pragma omp critical
{
if (printed == 0) {
printed = 1;
omp_get_schedule(&sched_type, &chunk_read);
printf("\n\tDentro del bucle:\n dyn var: %d\n nthreads-var: %d\nthread-limit-var: %d\n run-shed-var: %s --- %d\n\n\n",
omp_get_dynamic(), omp_get_max_threads(), omp_get_thread_limit(),
printEnum(sched_type), chunk_read);
}
}
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
otfft_avxdit4omp.h | // Copyright (c) 2015, OK おじさん(岡久卓也)
// Copyright (c) 2015, OK Ojisan(Takuya OKAHISA)
// Copyright (c) 2017 to the present, DEWETRON GmbH
// OTFFT Implementation Version 9.5
// based on Stockham FFT algorithm
// from OK Ojisan(Takuya OKAHISA), source: http://www.moon.sannet.ne.jp/okahisa/stockham/stockham.html
#pragma once
#include "otfft_misc.h"
namespace OTFFT_NAMESPACE {
namespace OTFFT_AVXDIT4omp { //////////////////////////////////////////////////
using namespace OTFFT;
using namespace OTFFT_MISC;
static const int AVX_THRESHOLD = 1<<10;
///////////////////////////////////////////////////////////////////////////////
// Forward Butterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct fwdcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/4;
static const int N2 = N1*2;
static const int N3 = N1*3;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/8; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s4p = 4*sp;
const ymm w1p = duppz3(W[1*sp]);
const ymm w2p = duppz3(W[2*sp]);
const ymm w3p = duppz3(W[3*sp]);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s4p = y + q + s4p;
const ymm a = getpz2(yq_s4p+s*0);
const ymm b = mulpz2(w1p, getpz2(yq_s4p+s*1));
const ymm c = mulpz2(w2p, getpz2(yq_s4p+s*2));
const ymm d = mulpz2(w3p, getpz2(yq_s4p+s*3));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(xq_sp+N0, addpz2(apc, bpd));
setpz2(xq_sp+N1, subpz2(amc, jbmd));
setpz2(xq_sp+N2, subpz2(apc, bpd));
setpz2(xq_sp+N3, addpz2(amc, jbmd));
}
}
};
template <int N> struct fwdcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/4;
static const int N2 = N1*2;
static const int N3 = N1*3;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_4p = y + 4*p;
const ymm w1p = getpz2(W+p);
const ymm w2p = getwp2<2>(W,p);
const ymm w3p = getwp2<3>(W,p);
const ymm ab = getpz2(y_4p+0);
const ymm cd = getpz2(y_4p+2);
const ymm ef = getpz2(y_4p+4);
const ymm gh = getpz2(y_4p+6);
const ymm a = catlo(ab, ef);
const ymm b = mulpz2(w1p, cathi(ab, ef));
const ymm c = mulpz2(w2p, catlo(cd, gh));
const ymm d = mulpz2(w3p, cathi(cd, gh));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(x_p+N0, addpz2(apc, bpd));
setpz2(x_p+N1, subpz2(amc, jbmd));
setpz2(x_p+N2, subpz2(apc, bpd));
setpz2(x_p+N3, addpz2(amc, jbmd));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct fwdend<4,s,eo,mode>
{
static const int N = 4*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm a = scalepz2<N,mode>(getpz2(zq+s*0));
const ymm b = scalepz2<N,mode>(getpz2(zq+s*1));
const ymm c = scalepz2<N,mode>(getpz2(zq+s*2));
const ymm d = scalepz2<N,mode>(getpz2(zq+s*3));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(xq+s*0, addpz2(apc, bpd));
setpz2(xq+s*1, subpz2(amc, jbmd));
setpz2(xq+s*2, subpz2(apc, bpd));
setpz2(xq+s*3, addpz2(amc, jbmd));
}
}
};
template <bool eo, int mode> struct fwdend<4,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm a = scalepz<4,mode>(getpz(z[0]));
const xmm b = scalepz<4,mode>(getpz(z[1]));
const xmm c = scalepz<4,mode>(getpz(z[2]));
const xmm d = scalepz<4,mode>(getpz(z[3]));
const xmm apc = addpz(a, c);
const xmm amc = subpz(a, c);
const xmm bpd = addpz(b, d);
const xmm jbmd = jxpz(subpz(b, d));
setpz(x[0], addpz(apc, bpd));
setpz(x[1], subpz(amc, jbmd));
setpz(x[2], subpz(apc, bpd));
setpz(x[3], addpz(amc, jbmd));
}
}
};
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct fwdend<2,s,eo,mode>
{
static const int N = 2*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm a = scalepz2<N,mode>(getpz2(zq+0));
const ymm b = scalepz2<N,mode>(getpz2(zq+s));
setpz2(xq+0, addpz2(a, b));
setpz2(xq+s, subpz2(a, b));
}
}
};
template <bool eo, int mode> struct fwdend<2,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm a = scalepz<2,mode>(getpz(z[0]));
const xmm b = scalepz<2,mode>(getpz(z[1]));
setpz(x[0], addpz(a, b));
setpz(x[1], subpz(a, b));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Forward FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdfft<n/4,4*s,!eo,mode>()(y, x, W);
fwdcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct fwdfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse Butterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct invcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/4;
static const int N2 = N1*2;
static const int N3 = N1*3;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/8; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s4p = 4*sp;
const ymm w1p = duppz3(W[N-1*sp]);
const ymm w2p = duppz3(W[N-2*sp]);
const ymm w3p = duppz3(W[N-3*sp]);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s4p = y + q + s4p;
const ymm a = getpz2(yq_s4p+s*0);
const ymm b = mulpz2(w1p, getpz2(yq_s4p+s*1));
const ymm c = mulpz2(w2p, getpz2(yq_s4p+s*2));
const ymm d = mulpz2(w3p, getpz2(yq_s4p+s*3));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(xq_sp+N0, addpz2(apc, bpd));
setpz2(xq_sp+N1, addpz2(amc, jbmd));
setpz2(xq_sp+N2, subpz2(apc, bpd));
setpz2(xq_sp+N3, subpz2(amc, jbmd));
}
}
};
template <int N> struct invcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/4;
static const int N2 = N1*2;
static const int N3 = N1*3;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_4p = y + 4*p;
const ymm w1p = cnjpz2(getpz2(W+p));
const ymm w2p = getwp2<-2>(W+N,p);
const ymm w3p = getwp2<-3>(W+N,p);
const ymm ab = getpz2(y_4p+0);
const ymm cd = getpz2(y_4p+2);
const ymm ef = getpz2(y_4p+4);
const ymm gh = getpz2(y_4p+6);
const ymm a = catlo(ab, ef);
const ymm b = mulpz2(w1p, cathi(ab, ef));
const ymm c = mulpz2(w2p, catlo(cd, gh));
const ymm d = mulpz2(w3p, cathi(cd, gh));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(x_p+N0, addpz2(apc, bpd));
setpz2(x_p+N1, addpz2(amc, jbmd));
setpz2(x_p+N2, subpz2(apc, bpd));
setpz2(x_p+N3, subpz2(amc, jbmd));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct invend<4,s,eo,mode>
{
static const int N = 4*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm a = scalepz2<N,mode>(getpz2(zq+s*0));
const ymm b = scalepz2<N,mode>(getpz2(zq+s*1));
const ymm c = scalepz2<N,mode>(getpz2(zq+s*2));
const ymm d = scalepz2<N,mode>(getpz2(zq+s*3));
const ymm apc = addpz2(a, c);
const ymm amc = subpz2(a, c);
const ymm bpd = addpz2(b, d);
const ymm jbmd = jxpz2(subpz2(b, d));
setpz2(xq+s*0, addpz2(apc, bpd));
setpz2(xq+s*1, addpz2(amc, jbmd));
setpz2(xq+s*2, subpz2(apc, bpd));
setpz2(xq+s*3, subpz2(amc, jbmd));
}
}
};
template <bool eo, int mode> struct invend<4,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm a = scalepz<4,mode>(getpz(z[0]));
const xmm b = scalepz<4,mode>(getpz(z[1]));
const xmm c = scalepz<4,mode>(getpz(z[2]));
const xmm d = scalepz<4,mode>(getpz(z[3]));
const xmm apc = addpz(a, c);
const xmm amc = subpz(a, c);
const xmm bpd = addpz(b, d);
const xmm jbmd = jxpz(subpz(b, d));
setpz(x[0], addpz(apc, bpd));
setpz(x[1], addpz(amc, jbmd));
setpz(x[2], subpz(apc, bpd));
setpz(x[3], subpz(amc, jbmd));
}
}
};
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct invend<2,s,eo,mode>
{
static const int N = 2*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm a = scalepz2<N,mode>(getpz2(zq+0));
const ymm b = scalepz2<N,mode>(getpz2(zq+s));
setpz2(xq+0, addpz2(a, b));
setpz2(xq+s, subpz2(a, b));
}
}
};
template <bool eo, int mode> struct invend<2,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm a = scalepz<2,mode>(getpz(z[0]));
const xmm b = scalepz<2,mode>(getpz(z[1]));
setpz(x[0], addpz(a, b));
setpz(x[1], subpz(a, b));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invfft<n/4,4*s,!eo,mode>()(y, x, W);
invcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct invfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Power of 2 FFT Routine
///////////////////////////////////////////////////////////////////////////////
inline void fwd(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwd0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
fwd(log_N, x, y, W);
}
///////////////////////////////////////////////////////////////////////////////
inline void inv(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void inv0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
inv(log_N, x, y, W);
}
inline void invu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void invn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
} /////////////////////////////////////////////////////////////////////////////
}
|
core_zher2k.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "core_lapack.h"
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_her2k
*
* Performs one of the Hermitian rank 2k operations
*
* \f[ C = \alpha A \times B^H + conjg( \alpha ) B \times A^H + \beta C, \f]
* or
* \f[ C = \alpha A^H \times B + conjg( \alpha ) B^H \times A + \beta C, \f]
*
* where alpha is a complex scalar, beta is a real scalar,
* C is an n-by-n Hermitian matrix, and A and B are n-by-k matrices
* in the first case and k-by-n matrices in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^H
* + conjg( \alpha ) B \times A^H + \beta C; \f]
* - PlasmaConjTrans:
* \f[ C = \alpha A^H \times B
* + conjg( \alpha ) B^H \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= zero.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A and B matrices;
* if trans = PlasmaConjTrans, number of rows of the A and B matrices.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* An lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaConjTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaConjTrans, lda >= max(1, k).
*
* @param[in] B
* An ldb-by-kb matrix.
* If trans = PlasmaNoTrans, kb = k;
* if trans = PlasmaConjTrans, kb = n.
*
* @param[in] ldb
* The leading dimension of the array B.
* If trans = PlasmaNoTrans, ldb >= max(1, n);
* if trans = PlasmaConjTrans, ldb >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* An ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
******************************************************************************/
void core_zher2k(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
const plasma_complex64_t *B, int ldb,
double beta, plasma_complex64_t *C, int ldc)
{
cblas_zher2k(CblasColMajor,
(CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
n, k,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
beta, C, ldc);
}
/******************************************************************************/
void core_omp_zher2k(
plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
const plasma_complex64_t *B, int ldb,
double beta, plasma_complex64_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
int bk;
if (trans == PlasmaNoTrans) {
ak = k;
bk = k;
}
else {
ak = n;
bk = n;
}
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
core_zher2k(uplo, trans,
n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
omp_section_private.c | <ompts:test>
<ompts:testdescription>Test which checks the omp section private directive by upcounting a variable in a to several sections splitted loop.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp section private</ompts:directive>
<ompts:dependences>omp critical</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_section_private</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int sum;
int sum0;
int i;
</ompts:orphan:vars>
int known_sum;
sum = 7;
sum0 = 0;
#pragma omp parallel
{
<ompts:orphan>
#pragma omp sections <ompts:check>private(sum0,i)</ompts:check><ompts:crosscheck>private(i)</ompts:crosscheck>
{
#pragma omp section
{
sum0 = 0;
for (i = 1; i < 400; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
sum0 = 0;
for (i = 400; i < 700; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
sum0 = 0;
for (i = 700; i < 1000; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of sections*/
</ompts:orphan>
} /* end of parallel */
known_sum = (999 * 1000) / 2 + 7;
return (known_sum == sum);
} /* end of check_section_private*/
</ompts:testcode>
</ompts:test>
|
matrix_openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
void fillMatrix(float *mat, int min, int max, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
mat[i] = (rand() % (max - min + 1)) + min;
}
}
void multiply() {
// create the matrices, 2 that contain the numbers we want to multiply
int row_matrix_1 = 50000; int row_matrix_2 = 50000;
int col_matrix_1 = 50000; int col_matrix_2 = 50000;
// creating 3rd matrix to hold the final value
int row_matrix_3 = 50000; int col_matrix_3 = 50000;
float *matrix_one = (float*) malloc(sizeof(float)*row_matrix_1*col_matrix_1);
float *matrix_two = (float*) malloc(sizeof(float)*row_matrix_2*col_matrix_2);
float *matrix_three = (float*) malloc(sizeof(float)*row_matrix_3*col_matrix_3);
float sum = 0;
// fill the matrices with random numbers
fillMatrix(matrix_one, 1, 100, row_matrix_1 * col_matrix_1);
fillMatrix(matrix_two, 1, 100, row_matrix_2 * col_matrix_2);
// multiply the matrices
// print that multiplication is happening
printf("Multiplying the matrices...\n");
#pragma omp parallel for
for (int i = 0; i < 10; i++) {
#pragma omp parallel for private(row_matrix_1, col_matrix_1, col_matrix_2, col_matrix_3)
for(int i = 0; i < row_matrix_1; i++ ){
for(int j = 0; j < col_matrix_2; j++){
sum = 0;
#pragma omp parallel for reduction(+:sum)
for(int k = 0; k < col_matrix_1; k++){
//printf("%f * %f = %f\n", matrix_one[i*col_matrix_1 + k], matrix_two[k*col_matrix_2 + j], sum);
sum += matrix_one[i*col_matrix_1+k]* matrix_two[k*col_matrix_2+k];
}
matrix_three[i* col_matrix_3 + j]=sum;
}
}
}
printf("Multiplication done\n");
// free the memory allocation
free(matrix_one);
free(matrix_two);
free(matrix_three);
// This code is contributed by Manish Kumar (mkumar2789)
}
int main(void) {
// calls the multiple function
// start clock
clock_t start = clock();
multiply();
// stop clock
clock_t end = clock();
double time_spent = (double)(end - start) / CLOCKS_PER_SEC;
printf("Time: %f seconds\n", time_spent);
}
|
cvAdvDiff_bnd_omp.c | /* -----------------------------------------------------------------
* Programmer(s): Daniel Reynolds and Ting Yan @ SMU
* Based on cvAdvDiff_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2022, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* solved using CVODE.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the SUNBAND linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value or the number of threads from the
* environment value, run without arguments:
* % ./cvAdvDiff_bnd_omp
* The environment variable can be over-ridden with a command line
* argument specifying the number of threads to use, e.g:
* % ./cvAdvDiff_bnd_omp 5
* ----------------------------------------------------------------- */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Header files with a description of contents */
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* definition of type realtype */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants */
#define XMAX RCONST(2.0) /* domain boundaries */
#define YMAX RCONST(1.0)
#define MX 10 /* mesh dimensions */
#define MY 5
#define NEQ MX*MY /* number of equations */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/* User-defined vector access macro IJth */
/* IJth is defined in order to isolate the translation from the
mathematical 2-dimensional structure of the dependent variable vector
to the underlying 1-dimensional storage.
IJth(vdata,i,j) references the element in the vdata array for
u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY.
The vdata array is obtained via the macro call vdata = NV_DATA_S(v),
where v is an N_Vector.
The variables are ordered by the y index j, then by the x index i. */
#define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY])
/* Type : UserData (contains grid constants) */
typedef struct {
realtype dx, dy, hdcoef, hacoef, vdcoef;
int nthreads;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, UserData data);
static void PrintHeader(realtype reltol, realtype abstol, realtype umax);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
SUNContext sunctx;
realtype dx, dy, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
int num_threads;
u = NULL;
data = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Create the SUNDIALS context */
retval = SUNContext_Create(NULL, &sunctx);
if(check_retval(&retval, "SUNContext_Create", 1)) return(1);
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create an OpenMP vector */
u = N_VNew_OpenMP(NEQ, num_threads, sunctx); /* Allocate u vector */
if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_retval((void *)data, "malloc", 2)) return(1);
dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */
dy = data->dy = YMAX/(MY+1);
data->hdcoef = ONE/(dx*dx);
data->hacoef = HALF/(TWO*dx);
data->vdcoef = ONE/(dy*dy);
data->nthreads = num_threads;
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF, sunctx);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create banded SUNMatrix for use in linear solves -- since this will be factored,
set the storage bandwidth to be the sum of upper and lower bandwidths */
A = SUNBandMatrix(NEQ, MY, MY, sunctx);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
/* Create banded SUNLinearSolver object for use by CVode */
LS = SUNLinSol_Band(u, A, sunctx);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
/* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */
retval = CVodeSetLinearSolver(cvode_mem, LS, A);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
retval = CVodeSetJacFn(cvode_mem, Jac);
if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
printf("num_threads = %i\n\n", num_threads);
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
SUNLinSolFree(LS); /* Free the linear solver memory */
SUNMatDestroy(A); /* Free the matrix memory */
free(data); /* Free the user data */
SUNContext_Free(&sunctx);
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u,N_Vector udot, void *user_data)
{
realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff;
realtype *udata, *dudata;
sunindextype i, j;
UserData data;
i = j = 0;
udata = NV_DATA_OMP(u);
dudata = NV_DATA_OMP(udot);
/* Extract needed constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
/* Loop over all grid points. */
#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
/* Extract u at x_i, y_j and four neighboring points */
uij = IJth(udata, i, j);
udn = (j == 1) ? ZERO : IJth(udata, i, j-1);
uup = (j == MY) ? ZERO : IJth(udata, i, j+1);
ult = (i == 1) ? ZERO : IJth(udata, i-1, j);
urt = (i == MX) ? ZERO : IJth(udata, i+1, j);
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
IJth(dudata, i, j) = hdiff + hadv + vdiff;
}
}
return(0);
}
/* Jacobian routine. Compute J(t,u). */
static int Jac(realtype t, N_Vector u, N_Vector fu,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
sunindextype i, j, k;
realtype *kthCol, hordc, horac, verdc;
UserData data;
/*
The components of f = udot that depend on u(i,j) are
f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with
df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2)
df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1)
df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX)
df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1)
df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY)
*/
i = j = 0;
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
k = j-1 + (i-1)*MY;
kthCol = SUNBandMatrix_Column(J,k);
/* set the kth column of J */
SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc);
if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac;
if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac;
if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc;
if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc;
}
}
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
sunindextype i, j;
realtype x, y, dx, dy;
realtype *udata;
i = j = 0;
/* Extract needed constants from data */
dx = data->dx;
dy = data->dy;
/* Set pointer to data array in vector u. */
udata = NV_DATA_OMP(u);
/* Load initial profile into u vector */
#pragma omp parallel for default(shared) private(j, i, y, x)
for (j=1; j <= MY; j++) {
y = j*dy;
for (i=1; i <= MX; i++) {
x = i*dx;
IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*exp(FIVE*x*y);
}
}
}
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", MX, MY);
printf("Total system size = %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
int retval;
long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetNumJacEvals(cvode_mem, &nje);
check_retval(&retval, "CVodeGetNumJacEvals", 1);
retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVodeGetNumLinRhsEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n",
nst, nfe, nsetups, nfeLS, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n",
nni, ncfn, netf);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval < 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
DemBonesExt.h | ///////////////////////////////////////////////////////////////////////////////
// Dem Bones - Skinning Decomposition Library //
// Copyright (c) 2019, Electronic Arts. All rights reserved. //
///////////////////////////////////////////////////////////////////////////////
#ifndef DEM_BONES_EXT
#define DEM_BONES_EXT
#include "DemBones.h"
#include <Eigen/Geometry>
#ifndef DEM_BONES_MAT_BLOCKS
#include "MatBlocks.h"
#define DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED
#endif
namespace Dem
{
/** @class DemBonesExt DemBonesExt.h "DemBones/DemBonesExt.h"
@brief Extended class to handle hierarchical skeleton with local rotations/translations and bind matrices
@details Call computeRTB() to get local rotations/translations and bind matrices after skinning decomposition is done and other data is set.
@b _Scalar is the floating-point data type. @b _AniMeshScalar is the floating-point data type of mesh sequence #v.
*/
template<class _Scalar, class _AniMeshScalar>
class DemBonesExt: public DemBones<_Scalar, _AniMeshScalar> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Matrix4=Eigen::Matrix<_Scalar, 4, 4>;
using Matrix3=Eigen::Matrix<_Scalar, 3, 3>;
using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>;
using Vector4=Eigen::Matrix<_Scalar, 4, 1>;
using Vector3=Eigen::Matrix<_Scalar, 3, 1>;
using SparseMatrix=Eigen::SparseMatrix<_Scalar>;
using Triplet=Eigen::Triplet<_Scalar>;
using DemBones<_Scalar, _AniMeshScalar>::nIters;
using DemBones<_Scalar, _AniMeshScalar>::nInitIters;
using DemBones<_Scalar, _AniMeshScalar>::nTransIters;
using DemBones<_Scalar, _AniMeshScalar>::transAffine;
using DemBones<_Scalar, _AniMeshScalar>::transAffineNorm;
using DemBones<_Scalar, _AniMeshScalar>::nWeightsIters;
using DemBones<_Scalar, _AniMeshScalar>::nnz;
using DemBones<_Scalar, _AniMeshScalar>::weightsSmooth;
using DemBones<_Scalar, _AniMeshScalar>::weightsSmoothStep;
using DemBones<_Scalar, _AniMeshScalar>::weightEps;
using DemBones<_Scalar, _AniMeshScalar>::nV;
using DemBones<_Scalar, _AniMeshScalar>::nB;
using DemBones<_Scalar, _AniMeshScalar>::nS;
using DemBones<_Scalar, _AniMeshScalar>::nF;
using DemBones<_Scalar, _AniMeshScalar>::fStart;
using DemBones<_Scalar, _AniMeshScalar>::subjectID;
using DemBones<_Scalar, _AniMeshScalar>::u;
using DemBones<_Scalar, _AniMeshScalar>::w;
using DemBones<_Scalar, _AniMeshScalar>::m;
using DemBones<_Scalar, _AniMeshScalar>::v;
using DemBones<_Scalar, _AniMeshScalar>::fv;
using DemBones<_Scalar, _AniMeshScalar>::iter;
using DemBones<_Scalar, _AniMeshScalar>::iterTransformations;
using DemBones<_Scalar, _AniMeshScalar>:: iterWeights;
//! Timestamps for bone transformations #m, [@c size] = #nS, #fTime(@p k) is the timestamp of frame @p k
Eigen::VectorXd fTime;
//! Name of bones, [@c size] = #nB, #boneName(@p j) is the name bone of @p j
std::vector<std::string> boneName;
//! Parent bone index, [@c size] = #nB, #parent(@p j) is the index of parent bone of @p j, #parent(@p j) = -1 if @p j has no parent.
Eigen::VectorXi parent;
//! Original bind pre-matrix, [@c size] = [4*#nS, 4*#nB], #bind.@a block(4*@p s, 4*@p j, 4, 4) is the global bind matrix of bone @p j on subject @p s at the rest pose
MatrixX bind;
//! Inverse pre-multiplication matrices, [@c size] = [4*#nS, 4*#nB], #preMulInv.@a block(4*@p s, 4*@p j, 4, 4) is the inverse of pre-local transformation of bone @p j on subject @p s
MatrixX preMulInv;
//! Rotation order, [@c size] = [3*#nS, #nB], #rotOrder.@a col(@p j).@a segment<3>(3*@p s) is the rotation order of bone @p j on subject @p s, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order
Eigen::MatrixXi rotOrder;
//! Bind transformation update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity
int bindUpdate;
/** @brief Local rotations, translations and global bind matrices of a subject
@details Required all data in the base class: #u, #fv, #nV, #v, #nF, #fStart, #subjectID, #nS, #m, #w, #nB
This function will initialize these default values for missing attributes:
- #parent: -1 vector, [@c size] = #nB
- #preMulInv: 4*4 identity matrix blocks, [@c size] = [4*#nS, 4*#nB]
- #rotOrder: {0, 1, 2} vector blocks, [@c size] = [3*#nS, #nB]
@param[in] s is the subject index
@param[out] lr is the [3*@p nFr, #nB] by-reference output local rotations, @p lr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j at frame @p k
@param[out] lt is the [3*@p nFr, #nB] by-reference output local translations, @p lt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j at frame @p k
@param[out] gb is the [4, 4*#nB] by-reference output global bind matrices, @p gb.@a block(0, 4*@p j, 4, 4) is the bind matrix of bone j
@param[out] lbr is the [3, #nB] by-reference output local rotations at bind pose @p lbr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j
@param[out] lbt is the [3, #nB] by-reference output local translations at bind pose, @p lbt.@a col(@p j).segment<3>(3**3k) is the (@c tx, @c ty, @c tz) of bone @p j
@param[in] degreeRot=true will output rotations in degree, otherwise output in radian
*/
void computeRTB(int s, MatrixX& lr, MatrixX& lt, MatrixX& gb, MatrixX& lbr, MatrixX& lbt, bool degreeRot=true) {
computeBind(s, gb);
if (parent.size()==0) parent=Eigen::VectorXi::Constant(nB, -1);
if (preMulInv.size()==0) preMulInv=MatrixX::Identity(4, 4).replicate(nS, nB);
if (rotOrder.size()==0) rotOrder=Eigen::Vector3i(0, 1, 2).replicate(nS, nB);
int nFs=fStart(s+1)-fStart(s);
lr.resize(nFs*3, nB);
lt.resize(nFs*3, nB);
lbr.resize(3, nB);
lbt.resize(3, nB);
MatrixX lm(4*nFs, 4*nB);
#pragma omp parallel for
for (int j=0; j<nB; j++) {
Eigen::Vector3i ro=rotOrder.col(j).template segment<3>(s*3);
Matrix4 lb;
if (parent(j)==-1) lb=preMulInv.blk4(s, j)*gb.blk4(0, j);
else lb=preMulInv.blk4(s, j)*gb.blk4(0, parent(j)).inverse()*gb.blk4(0, j);
Vector3 curRot=Vector3::Zero();
toRot(lb.template topLeftCorner<3, 3>(), curRot, ro);
lbr.col(j)=curRot;
lbt.col(j)=lb.template topRightCorner<3, 1>();
Matrix4 lm;
for (int k=0; k<nFs; k++) {
if (parent(j)==-1) lm=preMulInv.blk4(s, j)*m.blk4(k+fStart(s), j)*gb.blk4(0, j);
else lm=preMulInv.blk4(s, j)*(m.blk4(k+fStart(s), parent(j))*gb.blk4(0, parent(j))).inverse()*m.blk4(k+fStart(s), j)*gb.blk4(0, j);
toRot(lm.template topLeftCorner<3, 3>(), curRot, ro);
lr.vec3(k, j)=curRot;
lt.vec3(k, j)=lm.template topRightCorner<3, 1>();
}
}
if (degreeRot) {
lr*=180/EIGEN_PI;
lbr*=180/EIGEN_PI;
}
}
private:
/** p-norm centroids (using #transAffineNorm) and rotations to identity
@param s is the subject index
@param b is the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j
*/
void computeCentroids(int s, MatrixX& b) {
MatrixX c=MatrixX::Zero(4, nB);
for (int i=0; i<nV; i++)
for (typename SparseMatrix::InnerIterator it(w, i); it; ++it)
c.col(it.row())+=pow(it.value(), transAffineNorm)*u.vec3(s, i).homogeneous();
b=MatrixX::Identity(4, 4).replicate(1, nB);
for (int j=0; j<nB; j++)
if (c(3, j)!=0) b.transVec(0, j)=c.col(j).template head<3>()/c(3, j);
}
/** Global bind pose
@param s is the subject index
@param bindUpdate is the type of bind pose update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity
@param b is the the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j
*/
void computeBind(int s, MatrixX& b) {
if (bind.size()==0) {
bind.resize(nS*4, nB*4);
MatrixX b;
for (int k=0; k<nS; k++) computeCentroids(k, b);
bind.block(4*s, 0, 4, 4*nB)=b;
}
switch (bindUpdate) {
case 0: b=bind.block(4*s, 0, 4, 4*nB); break;
case 1: computeCentroids(s, b); break;
}
}
/** Euler angles from rotation matrix
@param rMat is the 3*3 rotation matrix
@param curRot is the input current Euler angles, it is also the by-reference output closet Euler angles correspond to @p rMat
@param ro is the rotation order, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order
@param eps is the epsilon
*/
void toRot(const Matrix3& rMat, Vector3& curRot, const Eigen::Vector3i& ro, _Scalar eps=_Scalar(1e-10)) {
Vector3 r0=rMat.eulerAngles(ro(2), ro(1), ro(0)).reverse();
_Scalar gMin=(r0-curRot).squaredNorm();
Vector3 rMin=r0;
Vector3 r;
Matrix3 tmpMat;
for (int fx=-1; fx<=1; fx+=2)
for (_Scalar sx=-2*EIGEN_PI; sx<2.1*EIGEN_PI; sx+=EIGEN_PI) {
r(0)=fx*r0(0)+sx;
for (int fy=-1; fy<=1; fy+=2)
for (_Scalar sy=-2*EIGEN_PI; sy<2.1*EIGEN_PI; sy+=EIGEN_PI) {
r(1)=fy*r0(1)+sy;
for (int fz=-1; fz<=1; fz+=2)
for (_Scalar sz=-2*EIGEN_PI; sz<2.1*EIGEN_PI; sz+=EIGEN_PI) {
r(2)=fz*r0(2)+sz;
tmpMat=Matrix3(Eigen::AngleAxis<_Scalar>(r(ro(2)), Vector3::Unit(ro(2))))*
Eigen::AngleAxis<_Scalar>(r(ro(1)), Vector3::Unit(ro(1)))*
Eigen::AngleAxis<_Scalar>(r(ro(0)), Vector3::Unit(ro(0)));
if ((tmpMat-rMat).squaredNorm()<eps) {
_Scalar tmp=(r-curRot).squaredNorm();
if (tmp<gMin) {
gMin=tmp;
rMin=r;
}
}
}
}
}
curRot=rMin;
}
};
}
#ifdef DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED
#undef blk4
#undef rotMat
#undef transVec
#undef vec3
#undef DEM_BONES_MAT_BLOCKS
#endif
#undef rotMatFromEuler
#endif
|
trsm_x_csr_u_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac > r)
{
alpha_madde(temp, A->values[ai], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(y[r * ldy + out_y_col], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
convolution_sgemm_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#endif
{
#if __aarch64__
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
unsigned short* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size * 4;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#else
int nn_size = size >> 3;
int remain_size_start = 0;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.u16 {d0-d1}, [%0 :128] \n"
"vst1.u16 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += size * 4;
}
}
}
}
int remain_outch_start = 0;
#if __aarch64__
int nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w2233_01
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < size; i += 2)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r0 r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < size; i++)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r0
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
const unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // w0123_0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vshrn.u32 d20, q12, #16 \n"
"vshrn.u32 d21, q13, #16 \n"
"vshrn.u32 d22, q14, #16 \n"
"vshrn.u32 d23, q15, #16 \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vst1.u16 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < size; i += 2)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r0 r1
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.u16 {d4-d5}, [%2 :128]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < size; i++)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r0
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __aarch64__
kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4, 2u);
#else
kernel_tm.create(16 * maxk, inch / 4, outch / 4, 2u);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
unsigned short* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k40[k]);
g00[5] = float32_to_bfloat16(k50[k]);
g00[6] = float32_to_bfloat16(k60[k]);
g00[7] = float32_to_bfloat16(k70[k]);
g00[8] = float32_to_bfloat16(k01[k]);
g00[9] = float32_to_bfloat16(k11[k]);
g00[10] = float32_to_bfloat16(k21[k]);
g00[11] = float32_to_bfloat16(k31[k]);
g00[12] = float32_to_bfloat16(k41[k]);
g00[13] = float32_to_bfloat16(k51[k]);
g00[14] = float32_to_bfloat16(k61[k]);
g00[15] = float32_to_bfloat16(k71[k]);
g00[16] = float32_to_bfloat16(k02[k]);
g00[17] = float32_to_bfloat16(k12[k]);
g00[18] = float32_to_bfloat16(k22[k]);
g00[19] = float32_to_bfloat16(k32[k]);
g00[20] = float32_to_bfloat16(k42[k]);
g00[21] = float32_to_bfloat16(k52[k]);
g00[22] = float32_to_bfloat16(k62[k]);
g00[23] = float32_to_bfloat16(k72[k]);
g00[24] = float32_to_bfloat16(k03[k]);
g00[25] = float32_to_bfloat16(k13[k]);
g00[26] = float32_to_bfloat16(k23[k]);
g00[27] = float32_to_bfloat16(k33[k]);
g00[28] = float32_to_bfloat16(k43[k]);
g00[29] = float32_to_bfloat16(k53[k]);
g00[30] = float32_to_bfloat16(k63[k]);
g00[31] = float32_to_bfloat16(k73[k]);
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __aarch64__
unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
unsigned short* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k01[k]);
g00[5] = float32_to_bfloat16(k11[k]);
g00[6] = float32_to_bfloat16(k21[k]);
g00[7] = float32_to_bfloat16(k31[k]);
g00[8] = float32_to_bfloat16(k02[k]);
g00[9] = float32_to_bfloat16(k12[k]);
g00[10] = float32_to_bfloat16(k22[k]);
g00[11] = float32_to_bfloat16(k32[k]);
g00[12] = float32_to_bfloat16(k03[k]);
g00[13] = float32_to_bfloat16(k13[k]);
g00[14] = float32_to_bfloat16(k23[k]);
g00[15] = float32_to_bfloat16(k33[k]);
g00 += 16;
}
}
}
}
static void convolution_im2col_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
unsigned short* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
uint16x4_t _val2 = vld1_u16(sptr + stride_w * 8);
uint16x4_t _val3 = vld1_u16(sptr + stride_w * 12);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
vst1_u16(ptr + 8, _val2);
vst1_u16(ptr + 12, _val3);
sptr += stride_w * 16;
ptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
sptr += stride_w * 8;
ptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _val = vld1_u16(sptr);
vst1_u16(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
burgers1d_b.c | /* Generated by TAPENADE (INRIA, Ecuador team)
Tapenade 3.14 (r7079) - 5 Oct 2018 09:55
*/
#include <adBuffer.h>
/*
Differentiation of fmax in reverse (adjoint) mode:
gradient of useful results: fmax b
with respect to varying inputs: b
*/
void fmax_b(double a, double b, double *bb, double fmaxb) {
double fmax;
if (a <= b)
*bb = *bb + fmaxb;
}
double fmax_nodiff(double a, double b) {
if (a > b)
return a;
else
return b;
}
/*
Differentiation of fmin in reverse (adjoint) mode:
gradient of useful results: fmin b
with respect to varying inputs: b
*/
void fmin_b(double a, double b, double *bb, double fminb) {
double fmin;
if (a >= b)
*bb = *bb + fminb;
}
double fmin_nodiff(double a, double b) {
if (a < b)
return a;
else
return b;
}
/*
Differentiation of burgers1d in reverse (adjoint) mode:
gradient of useful results: *u *u_1
with respect to varying inputs: *u *u_1
RW status of diff variables: *u:in-out *u_1:incr
Plus diff mem management of: u:in u_1:in
*/
void burgers1d_b(double *u, double *ub, double *u_1, double *u_1b, double D,
double C, int n) {
int i;
double result1;
double result1b;
double result2;
double result2b;
double tempb;
double tempb0;
//#pragma omp parallel for private(i)
for (i = 1; i < n-1; ++i) {
pushReal8(result1);
result1 = fmin_nodiff(0, u_1[i]);
pushReal8(result2);
result2 = fmax_nodiff(0, u_1[i]);
}
#pragma omp parallel for private(i, tempb, tempb0, result1b, result2b)
for (i = n-2; i > 0; --i) {
tempb = -(C*ub[i]);
tempb0 = D*ub[i];
#pragma omp atomic
u_1b[i + 1] = u_1b[i + 1] + tempb0 + result1*tempb;
#pragma omp atomic
u_1b[i] = u_1b[i] + ub[i] - 2.0*tempb0 + (result2-result1)*tempb;
result1b = (u_1[i+1]-u_1[i])*tempb;
#pragma omp atomic
u_1b[i - 1] = u_1b[i - 1] + tempb0 - result2*tempb;
result2b = (u_1[i]-u_1[i-1])*tempb;
popReal8(&result2);
fmax_b(0, u_1[i], &(tempb), result2b);
#pragma omp atomic
u_1b[i] += tempb;
popReal8(&result1);
fmin_b(0, u_1[i], &(tempb), result1b);
#pragma omp atomic
u_1b[i] += tempb;
}
}
|
GB_binop__lor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_int8
// A.*B function (eWiseMult): GB_AemultB__lor_int8
// A*D function (colscale): GB_AxD__lor_int8
// D*A function (rowscale): GB_DxB__lor_int8
// C+=B function (dense accum): GB_Cdense_accumB__lor_int8
// C+=b function (dense accum): GB_Cdense_accumb__lor_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int8
// C=scalar+B GB_bind1st__lor_int8
// C=scalar+B' GB_bind1st_tran__lor_int8
// C=A+scalar GB_bind2nd__lor_int8
// C=A'+scalar GB_bind2nd_tran__lor_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lor_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_bool
// op(A') function: GB_tran__lnot_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_bool
(
uint16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FormulaDemandCalculator.h | #pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <fstream>
#include <iostream>
#include <random>
#include <string>
#include "Algorithms/Dijkstra/Dijkstra.h"
#include "DataStructures/Graph/Attributes/TravelTimeAttribute.h"
#include "DataStructures/Graph/Graph.h"
#include "DataStructures/Labels/BasicLabelSet.h"
#include "DataStructures/Labels/ParentInfo.h"
#include "Tools/CommandLine/ProgressBar.h"
#include "Tools/Constants.h"
#include "Tools/OpenMP.h"
#include "Tools/Timer.h"
// A travel demand calculator based on evaluating radiation model's formula for each pair of
// vertices. For each vertex s in the graph, we run a Dijkstra search without early stopping.
// Whenever we settle a vertex u, we know the intervening opportunities between s and u, and
// we evaluate the formula for s and u.
template <typename GraphT>
class FormulaDemandCalculator {
public:
// Constructs a travel demand calculator for the specified network.
explicit FormulaDemandCalculator(const GraphT& graph, const int seed, const bool verbose) noexcept
: graph(graph), totPop(0), totPoi(1), seed(seed), verbose(verbose) {
FORALL_VERTICES(graph, v) {
totPop += graph.population(v);
totPoi += graph.numOpportunities(v);
}
assert(totPop > 0);
assert(totPoi > 1);
assert(seed >= 0);
}
// Generates OD pairs and writes them to the specified file.
void calculateDemand(
int numODPairs, double lambda, double swapProb, const std::string& fileName) const {
Timer timer;
const auto maxNumSources = std::min(graph.numVertices(), DC_MAX_NUM_SOURCES);
if (verbose) std::cout << "Calculating demand: ";
ProgressBar bar(maxNumSources);
assert(lambda >= 0); assert(lambda < 1);
using Dij = Dijkstra<GraphT, TravelTimeAttribute, BasicLabelSet<0, ParentInfo::NO_PARENT_INFO>>;
#pragma omp parallel
{
Dij dijkstra(graph);
std::minstd_rand rand(seed + omp_get_thread_num() + 1);
std::bernoulli_distribution swapDist(swapProb);
std::ofstream out(fileName + ".part" + std::to_string(omp_get_thread_num()));
assert(out.good());
#pragma omp for schedule(static, 1) nowait
for (auto src = 0; src < maxNumSources; ++src) {
auto srcPop = graph.population(src); // The source population.
auto srcPoi = graph.numOpportunities(src) + 1; // The number of opportunities at the source.
auto intPoi = 0; // The number of intervening opportunities.
if (srcPop == 0)
continue;
auto outflow = std::binomial_distribution<>(numODPairs, srcPop / totPop)(rand);
auto normConst = 1 / (1 -
(1 - std::pow(lambda, totPoi)) / (1 - std::pow(lambda, srcPoi)) * (srcPoi / totPoi));
auto n = (1 - std::pow(lambda, srcPoi)) / srcPoi;
dijkstra.init({{src}});
dijkstra.settleNextVertex();
while (!dijkstra.queue.empty()) {
auto dst = dijkstra.settleNextVertex();
auto dstPoi = graph.numOpportunities(dst);
if (dstPoi == 0)
continue;
// Evaluate radiation model's formula for src and dst.
auto ns = (1 - std::pow(lambda, srcPoi + intPoi)) / (srcPoi + intPoi);
auto nns = (1 - std::pow(lambda, srcPoi + dstPoi + intPoi)) / (srcPoi + dstPoi + intPoi);
auto prob = (ns - nns) / n;
auto numTravelers = std::binomial_distribution<>(outflow, normConst * prob)(rand);
for (auto i = 0; i < numTravelers; ++i)
if (swapDist(rand))
out << dst << ',' << src << '\n';
else
out << src << ',' << dst << '\n';
intPoi += dstPoi;
}
++bar;
}
}
bar.finish();
if (verbose) std::cout << "done (" << timer.elapsed() << "ms)." << std::endl;
}
private:
const GraphT& graph; // The network we work on.
double totPop; // The total number of inhabitants living in the network.
double totPoi; // The total number of opportunities in the network.
const int seed; // The seed with which the random number generator will be started.
const bool verbose; // Should we display informative messages?
};
|
pp_collision.c | /* Copyright (C) 2017 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <phonoc_array.h>
#include <phonoc_const.h>
#include <phonoc_utils.h>
#include <phonon3_h/imag_self_energy_with_g.h>
#include <phonon3_h/pp_collision.h>
#include <phonon3_h/interaction.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_iw.h>
#include <lapack_wrapper.h>
static void get_collision(double *ise,
const int num_band0,
const int num_band,
const int num_temps,
const double *temperatures,
const double *g,
const char *g_zero,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const int *triplets,
const int weight,
const int *grid_address,
const int *mesh,
const double *fc3,
const int is_compact_fc3,
const double *shortest_vectors,
const int svecs_dims[3],
const int *multiplicity,
const double *masses,
const int *p2s_map,
const int *s2p_map,
const int *band_indices,
const int symmetrize_fc3_q,
const double cutoff_frequency,
const int openmp_per_triplets);
static void finalize_ise(double *imag_self_energy,
const double *ise,
const int *grid_address,
const int *triplets,
const int num_triplets,
const int num_temps,
const int num_band0,
const int is_NU);
void ppc_get_pp_collision(double *imag_self_energy,
PHPYCONST int relative_grid_address[24][4][3], /* thm */
const double *frequencies,
const lapack_complex_double *eigenvectors,
const Iarray *triplets,
const int *weights,
const int *grid_address, /* thm */
const int *bz_map, /* thm */
const int *mesh, /* thm */
const double *fc3,
const int is_compact_fc3,
const double *shortest_vectors,
const int svecs_dims[3],
const int *multiplicity,
const double *masses,
const int *p2s_map,
const int *s2p_map,
const Iarray *band_indices,
const Darray *temperatures,
const int is_NU,
const int symmetrize_fc3_q,
const double cutoff_frequency)
{
int i, num_band, num_band0, num_band_prod, num_triplets, num_temps;
int openmp_per_triplets;
double *ise, *freqs_at_gp, *g;
char *g_zero;
int tp_relative_grid_address[2][24][4][3];
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = svecs_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_triplets = triplets->dims[0];
num_temps = temperatures->dims[0];
ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] = frequencies[triplets->data[0] * num_band +
band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address);
#pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
g = (double*)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char*)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight(g,
g_zero,
freqs_at_gp,
num_band0,
tp_relative_grid_address,
mesh,
triplets->data + i * 3,
1,
(int(*)[3])grid_address,
bz_map,
frequencies,
num_band,
2,
1 - openmp_per_triplets);
get_collision(ise + i * num_temps * num_band0,
num_band0,
num_band,
num_temps,
temperatures->data,
g,
g_zero,
frequencies,
eigenvectors,
triplets->data + i * 3,
weights[i],
grid_address,
mesh,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices->data,
symmetrize_fc3_q,
cutoff_frequency,
openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy,
ise,
grid_address,
triplets->data,
num_triplets,
num_temps,
num_band0,
is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
void ppc_get_pp_collision_with_sigma(
double *imag_self_energy,
const double sigma,
const double sigma_cutoff,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const Iarray *triplets,
const int *weights,
const int *grid_address,
const int *mesh,
const double *fc3,
const int is_compact_fc3,
const double *shortest_vectors,
const int svecs_dims[3],
const int *multiplicity,
const double *masses,
const int *p2s_map,
const int *s2p_map,
const Iarray *band_indices,
const Darray *temperatures,
const int is_NU,
const int symmetrize_fc3_q,
const double cutoff_frequency)
{
int i, num_band, num_band0, num_band_prod, num_triplets, num_temps;
int openmp_per_triplets, const_adrs_shift;
double cutoff;
double *ise, *freqs_at_gp, *g;
char *g_zero;
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = svecs_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_triplets = triplets->dims[0];
num_temps = temperatures->dims[0];
const_adrs_shift = num_band_prod;
ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] = frequencies[triplets->data[0] * num_band +
band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
cutoff = sigma * sigma_cutoff;
#pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
g = (double*)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char*)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight_with_sigma(g,
g_zero,
sigma,
cutoff,
freqs_at_gp,
num_band0,
triplets->data + i * 3,
const_adrs_shift,
frequencies,
num_band,
2,
0);
get_collision(ise + i * num_temps * num_band0,
num_band0,
num_band,
num_temps,
temperatures->data,
g,
g_zero,
frequencies,
eigenvectors,
triplets->data + i * 3,
weights[i],
grid_address,
mesh,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices->data,
symmetrize_fc3_q,
cutoff_frequency,
openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy,
ise,
grid_address,
triplets->data,
num_triplets,
num_temps,
num_band0,
is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
static void get_collision(double *ise,
const int num_band0,
const int num_band,
const int num_temps,
const double *temperatures,
const double *g,
const char *g_zero,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const int *triplets,
const int weight,
const int *grid_address,
const int *mesh,
const double *fc3,
const int is_compact_fc3,
const double *shortest_vectors,
const int svecs_dims[3],
const int *multiplicity,
const double *masses,
const int *p2s_map,
const int *s2p_map,
const int *band_indices,
const int symmetrize_fc3_q,
const double cutoff_frequency,
const int openmp_per_triplets)
{
int i, num_band_prod, num_g_pos;
double *fc3_normal_squared;
int (*g_pos)[4];
fc3_normal_squared = NULL;
g_pos = NULL;
num_band_prod = num_band0 * num_band * num_band;
fc3_normal_squared = (double*)malloc(sizeof(double) * num_band_prod);
g_pos = (int(*)[4])malloc(sizeof(int[4]) * num_band_prod);
for (i = 0; i < num_band_prod; i++) {
fc3_normal_squared[i] = 0;
}
num_g_pos = ise_set_g_pos(g_pos,
num_band0,
num_band,
g_zero);
get_interaction_at_triplet(
fc3_normal_squared,
num_band0,
num_band,
g_pos,
num_g_pos,
frequencies,
eigenvectors,
triplets,
grid_address,
mesh,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices,
symmetrize_fc3_q,
cutoff_frequency,
0,
0,
1 - openmp_per_triplets);
ise_imag_self_energy_at_triplet(
ise,
num_band0,
num_band,
fc3_normal_squared,
frequencies,
triplets,
weight,
g,
g + num_band_prod,
g_pos,
num_g_pos,
temperatures,
num_temps,
cutoff_frequency,
1 - openmp_per_triplets);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
free(g_pos);
g_pos = NULL;
}
static void finalize_ise(double *imag_self_energy,
const double *ise,
const int *grid_address,
const int *triplets,
const int num_triplets,
const int num_temps,
const int num_band0,
const int is_NU)
{
int i, j, k, is_N;
if (is_NU) {
for (i = 0; i < 2 * num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
is_N = tpl_is_N(triplets + i * 3, grid_address);
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
if (is_N) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
} else {
imag_self_energy[num_temps * num_band0 + j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
} else {
for (i = 0; i < num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
}
|
convolution_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+1<inch; q+=2)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* kernel0 = kernel + p*inch*4 + q*4;
const float* kernel1 = kernel0 + 4;
const float* r00 = img0;
const float* r01 = img0 + w;
const float* r10 = img1;
const float* r11 = img1 + w;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r000 = vld1q_f32(r00);
float32x4_t _r010 = vld1q_f32(r01);
float32x4_t _r001 = vld1q_f32(r00 + 1);
float32x4_t _r011 = vld1q_f32(r01 + 1);
float32x4_t _r100 = vld1q_f32(r10);
float32x4_t _r110 = vld1q_f32(r11);
float32x4_t _r101 = vld1q_f32(r10 + 1);
float32x4_t _r111 = vld1q_f32(r11 + 1);
float32x4_t _sum = vld1q_f32(outptr);
_sum = vmlaq_lane_f32(_sum, _r000, vget_low_f32(_k0), 0);
_sum = vmlaq_lane_f32(_sum, _r001, vget_low_f32(_k0), 1);
_sum = vmlaq_lane_f32(_sum, _r010, vget_high_f32(_k0), 0);
_sum = vmlaq_lane_f32(_sum, _r011, vget_high_f32(_k0), 1);
_sum = vmlaq_lane_f32(_sum, _r100, vget_low_f32(_k1), 0);
_sum = vmlaq_lane_f32(_sum, _r101, vget_low_f32(_k1), 1);
_sum = vmlaq_lane_f32(_sum, _r110, vget_high_f32(_k1), 0);
_sum = vmlaq_lane_f32(_sum, _r111, vget_high_f32(_k1), 1);
vst1q_f32(outptr, _sum);
r00 += 4;
r01 += 4;
r10 += 4;
r11 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d28-d29}, [%4]! \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d18-d19}, [%5] \n"// q9 = sum
"vmul.f32 q8, q0, %e12[0] \n"
"vmla.f32 q9, q2, %f12[0] \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q8, q12, %e13[0] \n"
"vmla.f32 q9, q14, %f13[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d26-d27}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d30-d31}, [%4]! \n"
"vmla.f32 q8, q10, %e12[1] \n"
"vmla.f32 q9, q11, %f12[1] \n"
"vext.f32 q10, q12, q13, #1 \n"
"vext.f32 q11, q14, q15, #1 \n"
"vmla.f32 q8, q10, %e13[1] \n"
"vmla.f32 q9, q11, %f13[1] \n"
"vorr q0, q1, q1 \n"
"vorr q2, q3, q3 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q12, q13, q13 \n"
"vorr q14, q15, q15 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%5]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
"sub %3, #16 \n"
"sub %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x2_t _r00 = vld1_f32(r00);
float32x2_t _r01 = vld1_f32(r01);
float32x4_t _r00r1 = vcombine_f32(_r00, _r01);
float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0);
float32x2_t _r10 = vld1_f32(r10);
float32x2_t _r11 = vld1_f32(r11);
float32x4_t _r10r1 = vcombine_f32(_r10, _r11);
_s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r00[0] * kernel0[0];
sum += r00[1] * kernel0[1];
sum += r01[0] * kernel0[2];
sum += r01[1] * kernel0[3];
sum += r10[0] * kernel1[0];
sum += r10[1] * kernel1[1];
sum += r11[0] * kernel1[2];
sum += r11[1] * kernel1[3];
*outptr += sum;
#endif // __ARM_NEON
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
outptr++;
}
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*4 + q*4;
const float* r0 = img0;
const float* r1 = img0 + w;
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(kernel0[0]);
float32x4_t _k1 = vdupq_n_f32(kernel0[1]);
float32x4_t _k2 = vdupq_n_f32(kernel0[2]);
float32x4_t _k3 = vdupq_n_f32(kernel0[3]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r01 = vld1q_f32(r0 + 1);
float32x4_t _r11 = vld1q_f32(r1 + 1);
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _sum2;
_sum = vmlaq_f32(_sum, _r00, _k0);
_sum2 = vmulq_f32(_r01, _k1);
_sum = vmlaq_f32(_sum, _r10, _k2);
_sum2 = vmlaq_f32(_sum2, _r11, _k3);
_sum = vaddq_f32(_sum, _sum2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"0: \n"
"pld [%3, #128] \n"
"vld1.f32 {d18-d19}, [%3] \n"// q9 = sum
"vmul.f32 q8, q0, %q8 \n"
"vmla.f32 q9, q2, %q10 \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vmla.f32 q8, q10, %q9 \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q9, q11, %q11 \n"
"vorr q0, q1, q1 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q2, q3, q3 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%3]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
#endif
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x2_t _r0 = vld1_f32(r0);
float32x2_t _r1 = vld1_f32(r1);
float32x4_t _r0r1 = vcombine_f32(_r0, _r1);
float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r0[0] * kernel0[0];
sum += r0[1] * kernel0[1];
sum += r1[0] * kernel0[2];
sum += r1[1] * kernel0[3];
*outptr += sum;
#endif
r0 += 1;
r1 += 1;
outptr++;
}
r0 += 1;
r1 += 1;
}
}
}
}
|
GB_binop__ge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp64)
// A*D function (colscale): GB (_AxD__ge_fp64)
// D*A function (rowscale): GB (_DxB__ge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp64)
// C=scalar+B GB (_bind1st__ge_fp64)
// C=scalar+B' GB (_bind1st_tran__ge_fp64)
// C=A+scalar GB (_bind2nd__ge_fp64)
// C=A'+scalar GB (_bind2nd_tran__ge_fp64)
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pjenccc.c | // Copyright (c) 2018 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/*
//
// Purpose:
// Color conversions functions, forward transform
//
// Contents:
// mfxiRGBToY_JPEG_8u_C3C1R
//
*/
#include "precomp.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef __OWNJ_H__
#include "ownj.h"
#endif
#ifndef __PJENCCCTBL_H__
#include "pjenccctbl.h"
#endif
#ifndef __PJENCCC_H__
#include "pjenccc.h"
#endif
#if (_IPPXSC < _IPPXSC_S2)
#if !((IPPJ_ENCCC_OPT) || (_IPPLRB>=_IPPLRB_B1))
LOCFUN(void, mfxownpj_RGBToY_JPEG_8u_C3C1R, (
const Ipp8u* rgb,
Ipp8u* y,
int width))
{
int i;
for(i = 0; i < width; i++)
{
y[0] = (Ipp8u)((mfxcc_table[rgb[0] + R_Y_OFFS] +
mfxcc_table[rgb[1] + G_Y_OFFS] +
mfxcc_table[rgb[2] + B_Y_OFFS] + 3) >> 16);
rgb += 3;
y += 1;
}
return;
} /* mfxownpj_RGBToY_JPEG_8u_C3C1R() */
#endif
#endif
/* ---------------------- library functions definitions -------------------- */
/* ///////////////////////////////////////////////////////////////////////////
// Name:
// mfxiRGBToY_JPEG_8u_C3C1R
//
// Purpose:
// RGB to Y color convert
//
// Parameter:
// pSrc pointer to input data
// SrcStep line offset in input data
// pDst pointer to output array
// DstStep line offset in output data
// roiSize ROI size
//
// Returns:
// IppStatus
//
// Notes:
// the color conversion equations:
// Y = 0.29900*R + 0.58700*G + 0.11400*B
*/
IPPFUN(IppStatus, mfxiRGBToY_JPEG_8u_C3C1R, (
const Ipp8u* pSrc,
int SrcStep,
Ipp8u* pDst,
int DstStep,
IppiSize roiSize))
{
int i;
IPP_BAD_ENC_CC_C3C1_RET()
#ifdef _OPENMP
#pragma omp parallel for IPP_OMP_NUM_THREADS() \
shared(pSrc,pDst,SrcStep,DstStep,roiSize) \
private(i) default(none) \
if((roiSize.height*roiSize.width) > (OMP_BOUNDARY))
#endif
for(i = 0; i < roiSize.height; i++)
{
const Ipp8u* rgb;
Ipp8u* y;
rgb = pSrc + i * SrcStep;
y = pDst + i * DstStep;
mfxownpj_RGBToY_JPEG_8u_C3C1R(rgb, y, roiSize.width);
}
return ippStsNoErr;
} /* mfxiRGBToY_JPEG_8u_C3C1R() */
|
link-list-alg-openmp3x.c | /*********************************************************************************
OpenMP-3.0 Example Codes Beta-v1.0
File : link-list-alg-openmp3x.c
Date : March 2010
Description : The program perform the link list traversal(irregular parallelism)
in parallel using the openmp-3.0 feature task construct and
openMP-2.5 approach and measure the time taken in both the
approches.
a) incrementList_Wtake (OpenMP-2.5) : When a thread encounter the
parallel construct it creates the team of threads. The single
construct inside a parallel region restrict that only one thread
at a time can process the node. Its an unintuitive and inefficient
because only one thread at time is involved in processing which
incured relatively high cost of the single construct.
b) incrementList_Task(OpenMP-3.0) : This approach uses
the openMP-3.0 task construct. Whenever a thread encounters
a task construct, a new explicit task, An explicit task may be
executed by any thread in the current team, in parallel with
other tasks.In this approach the several task can be executed in
parallel.
OpenMP pragma/
Directive used : #pragma omp parallel
#pragma omp single
#pragma omp task
Input : - Number of Nodes in the link list
- Number of threads to be used
Output : Time taken in both approach.
*********************************************************************************/
/* Header file inclusion */
#include<stdio.h>
#include<stdlib.h>
#include <omp.h>
/* Global variable declaration */
long int totalNodes;
int numThreads;
typedef struct node node;
/* Structure for link list node */
struct node
{
int data;
struct node *link;
};
/* Function declaration to perform operation on Link List */
node *createList(node ** );
void incrementList_Task(node *);
void incrementListItem_Wtask(node *);
void processList(node *);
//void traverseList(node *);
/* Main Function */
int main(int argc,char** argv)
{
node *start;
start = NULL;
/* Checking for command line arguments */
if( argc != 3 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <total-nodes> <No. of Threads>\n");
exit(-1);
}
/* Initalizing Number of Nodes in the List and
Number of threads */
totalNodes =atol(argv[1]);
numThreads =atoi(argv[2]);
if(totalNodes<=0)
{
printf("\n\t Error : Number of nodes should be greater then 0\n");
exit(-1);
}
/* Function Calling to create the link list */
start=createList(&start);
printf("\n\t\t Total Nodes in the List : %ld ",totalNodes);
printf("\n\t\t Number of threads : %d ",numThreads);
/* Check for the Empty link list condition */
if ( start != NULL){
//traverseList(start);
/* Function Calling to process the list using Task Construct */
incrementList_Task(start);
/* Function Calling to process the list using Parallel Construct */
incrementListItem_Wtask(start);
//traverseList(start);
}
else
printf("\n List is Empty \n");
return 0;
} /* End of main */
/* Description : Function to create the link list
@param [ **q] : Start pointer to link list
@return : Start pointer to the list
*/
struct node *createList(node **q)
{
node *temp,*r,*head;
temp = *q;
int count=1;
while(count <= totalNodes ) {
/* If the node is the first node of the list */
if(*q==NULL)
{
/* Create the node */
if((temp = (node *)malloc(sizeof(struct node)))==NULL){
perror("\n\t Memory allocation for newnode ");
printf(" \n\t Creating the singly Link List.................Failed. \n");
exit(-1);
}
temp->data= rand();
temp->link=NULL;
*q=temp;
head=temp;
}
else
{
temp = *q;
while(temp->link !=NULL)
{
temp=temp->link;
}
/* Create the node */
if((r = (node *)malloc(sizeof(struct node)))==NULL){
perror("\n\t Memory allocation for newnode ");
printf(" \n\t Creating the singly Link List.................Failed. \n");
exit(-1);
}
r->data=rand() ;
r->link=NULL;
temp->link=r;
}
count++;
}
return head ;
} /* End of Create List function */
/* Description : Function to increment the List items using TASK CONSTRUCT (openmp-3.0)
Whenever a thread encounters a task construct, a new explicit task,
An explicit task may be executed by any thread in the current team,
in parallel with other tasks.In this approach the several task can
be executed in parallel.
@param [node] : start pointer to link list
@return : None
*/
void incrementList_Task(node *head)
{
double start_time,end_time;
/* Set the number of threads in parallel region */
omp_set_num_threads(numThreads);
/* Get the start time */
start_time=omp_get_wtime();
/* Create the parallel region */
#pragma omp parallel
{
#pragma omp single /*Restrict single thread create the task */
{
node * q = head;
while (q) {
#pragma omp task /* Create the task */
processList(q);
q = q->link;
}
}
} /* end of the parllel region */
end_time=omp_get_wtime();
printf("\n\t\t Time taken ( Task Construct : openMP-3.0) : %lf sec", (end_time -start_time));
} /* End of the function */
/* Description : Function to increment the List items WITHOUT using TASK CONSTRUCT (openmp-2.5)
When a thread encounter the parallel construct it creates the team of threads.
The single construct inside a parallel region restrict that only one thread
t a time can process the node. Its an unintuitive and inefficient because only
one thread at time is involved in processing which incured relatively high cost of
the single construct.
@param [head]: start pointer to link list
@return : None
*/
void incrementListItem_Wtask(node *head)
{
double start_time,end_time;
int list_Node[totalNodes],total_elements=0,i;
/* Set the number of threads */
omp_set_num_threads(numThreads);
/* get the start time */
start_time=omp_get_wtime();
/* Create the team of thread */
#pragma omp parallel
{
node * q = head;
while (q) {
#pragma omp single /* Restrict single thread process the list */
processList(q);
q = q->link;
}
}
end_time=omp_get_wtime();
printf("\n\t\t Time taken ( Parallel / Single Directive : OpenMP-2.5) : %lf sec\n", (end_time -start_time));
}/* End of the function */
/*
Description : Function to increment the node item.
@Param[q] : pointer to the current node
@return : None
*/
void processList(struct node *q)
{
//printf("\n\t My thread ID %d\n", omp_get_thread_num());
q->data = (q->data + q->data);
}
/* Function to travese the list */
/*void traverseList(node *q)
{
if(q==NULL)
{
printf("\n\nEmpty Link List.Can't Display The Data");
exit(-1);
}
printf("\t");
while(q!=NULL)
{
printf(" %d--->",q->data);
q=q->link;
}
}*/
|
util.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file internal/util.h
* \brief Internal utility functions
*/
#ifndef INTERNAL_UTIL_H_
#define INTERNAL_UTIL_H_
namespace qpp {
/**
* \namespace qpp::internal
* \brief Internal utility functions, do not use them directly or modify them
*/
namespace internal {
// integer index to multi-index, use C-style array for speed
// standard lexicographical order, e.g. 00, 01, 10, 11
inline void n2multiidx(idx n, idx numdims, const idx* const dims,
idx* result) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
if (numdims > 0) // numdims equal zero is a no-op
{
idx D = 1;
for (idx i = 0; i < numdims; ++i)
D *= dims[i];
assert(n < D);
}
#endif
// no error checks in release version to improve speed
for (idx i = 0; i < numdims; ++i) {
result[numdims - i - 1] = n % (dims[numdims - i - 1]);
n /= (dims[numdims - i - 1]);
}
}
// silence g++4.9 bogus warning -Warray-bounds and -Wmaybe-uninitialized
// in qpp::internal::multiidx2n()
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
// multi-index to integer index, use C-style array for speed,
// standard lexicographical order, e.g. 00->0, 01->1, 10->2, 11->3
inline idx multiidx2n(const idx* const midx, idx numdims,
const idx* const dims) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(numdims > 0);
#endif
// no error checks in release version to improve speed
// Static allocation for speed!
// double the size for matrices reshaped as vectors
idx part_prod[2 * maxn];
idx result = 0;
part_prod[numdims - 1] = 1;
for (idx i = 1; i < numdims; ++i) {
part_prod[numdims - i - 1] = part_prod[numdims - i] * dims[numdims - i];
result += midx[numdims - i - 1] * part_prod[numdims - i - 1];
}
return result + midx[numdims - 1];
}
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic pop
#endif
// check square matrix
template <typename Derived>
bool check_square_mat(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == A.cols();
}
// check whether input is a vector or not
template <typename Derived>
bool check_vector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1 || A.cols() == 1;
}
// check whether input is a row vector or not
template <typename Derived>
bool check_rvector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1;
}
// check whether input is a column vector or not
template <typename Derived>
bool check_cvector(const Eigen::MatrixBase<Derived>& A) {
return A.cols() == 1;
}
// check non-zero size of object that supports size() function
template <typename T>
bool check_nonzero_size(const T& x) noexcept {
return x.size() != 0;
}
// check that all sizes match
template <typename T1, typename T2>
bool check_matching_sizes(const T1& lhs, const T2& rhs) noexcept {
return lhs.size() == rhs.size();
}
// check that dims is a valid dimension vector
inline bool check_dims(const std::vector<idx>& dims) {
if (dims.size() == 0)
return false;
return std::find_if(std::begin(dims), std::end(dims),
[dims](idx i) -> bool {
if (i == 0)
return true;
else
return false;
}) == std::end(dims);
}
// check that valid dims match the dimensions
// of valid (non-zero sized) square matrix
template <typename Derived>
bool check_dims_match_mat(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() == A.cols());
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid column vector
template <typename Derived>
bool check_dims_match_cvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() > 0);
assert(A.cols() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid row vector
template <typename Derived>
bool check_dims_match_rvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.cols() > 0);
assert(A.rows() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
;
return proddim == static_cast<idx>(A.cols());
}
// check that all elements in valid dims equal to dim
inline bool check_eq_dims(const std::vector<idx>& dims, idx dim) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
#endif
for (idx i : dims)
if (i != dim)
return false;
return true;
}
// check that vector has no duplicates
inline bool check_no_duplicates(std::vector<idx> v) {
std::sort(std::begin(v), std::end(v));
if (std::unique(std::begin(v), std::end(v)) != std::end(v))
return false;
else
return true;
}
// check that subsys is valid with respect to valid dims
inline bool check_subsys_match_dims(const std::vector<idx>& subsys,
const std::vector<idx>& dims) {
// subsys can be empty
// check valid number of subsystems
if (subsys.size() > dims.size())
return false;
// check no duplicates
if (!check_no_duplicates(subsys))
return false;
// check range of subsystems
return std::find_if(std::begin(subsys), std::end(subsys),
[dims](idx i) -> bool {
return i + 1 > dims.size();
}) == std::end(subsys);
}
// check matrix is 2 x 2
template <typename Derived>
bool check_qubit_matrix(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 2;
}
// check column vector is 2 x 1
template <typename Derived>
bool check_qubit_cvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 1;
}
// check row vector is 1 x 2
template <typename Derived>
bool check_qubit_rvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 1 && A.cols() == 2;
}
// check row vector is 1 x 2 or 2 x 1
template <typename Derived>
bool check_qubit_vector(const Eigen::MatrixBase<Derived>& A) noexcept {
return (A.rows() == 1 && A.cols() == 2) || (A.rows() == 2 && A.cols() == 1);
}
// check valid permutation
inline bool check_perm(const std::vector<idx>& perm) {
if (perm.size() == 0)
return false;
std::vector<idx> ordered(perm.size());
std::iota(std::begin(ordered), std::end(ordered), 0);
return std::is_permutation(std::begin(ordered), std::end(ordered),
std::begin(perm));
}
// Kronecker product of 2 matrices, preserve return type
// internal function for the variadic template function wrapper kron()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> kron2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result;
result.resize(Arows * Brows, Acols * Bcols);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < Acols; ++j)
for (idx i = 0; i < Arows; ++i)
result.block(i * Brows, j * Bcols, Brows, Bcols) = rA(i, j) * rB;
return result;
}
// Direct sum of 2 matrices, preserve return type
// internal function for the variadic template function wrapper dirsum()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
dirsum2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result =
dyn_mat<typename Derived1::Scalar>::Zero(Arows + Brows, Acols + Bcols);
result.block(0, 0, Arows, Acols) = rA;
result.block(Arows, Acols, Brows, Bcols) = rB;
return result;
}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T>
// ends the recursion
void variadic_vector_emplace(std::vector<T>&) {}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T, typename First, typename... Args>
void variadic_vector_emplace(std::vector<T>& v, First&& first, Args&&... args) {
v.emplace_back(std::forward<First>(first));
variadic_vector_emplace(v, std::forward<Args>(args)...);
}
// returns the number of subsystems (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size D
inline idx get_num_subsys(idx D, idx d) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(D > 0);
assert(d > 1);
#endif
return static_cast<idx>(std::llround(std::log2(D) / std::log2(d)));
}
// returns the dimension of a subsystem (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size sz consisting
// of N subsystems
inline idx get_dim_subsys(idx sz, idx N) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(N > 0);
assert(sz > 0);
#endif
if (N == 2)
return static_cast<idx>(std::llround(std::sqrt(sz)));
return static_cast<idx>(std::llround(std::pow(sz, 1. / N)));
}
// implementation details for pretty formatting
struct Display_Impl_ {
template <typename T>
// T must support rows(), cols(), operator()(idx, idx) const
std::ostream& display_impl_(const T& A, std::ostream& os,
double chop = qpp::chop) const {
std::ostringstream ostr;
ostr.copyfmt(os); // copy os' state
std::vector<std::string> vstr;
std::string strA;
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j) {
strA.clear(); // clear the temporary string
ostr.clear();
ostr.str(std::string{}); // clear the ostringstream
// convert to complex
double re = static_cast<cplx>(A(i, j)).real();
double im = static_cast<cplx>(A(i, j)).imag();
if (std::abs(re) < chop && std::abs(im) < chop) {
ostr << "0 "; // otherwise segfault on destruction
// if using only vstr.emplace_back("0 ");
// bug in MATLAB libmx
vstr.emplace_back(ostr.str());
} else if (std::abs(re) < chop) {
ostr << im;
vstr.emplace_back(ostr.str() + "i");
} else if (std::abs(im) < chop) {
ostr << re;
vstr.emplace_back(ostr.str() + " ");
} else {
ostr << re;
strA = ostr.str();
strA += (im > 0 ? " + " : " - ");
ostr.clear();
ostr.str(std::string()); // clear
ostr << std::abs(im);
strA += ostr.str();
strA += "i";
vstr.emplace_back(strA);
}
}
}
// determine the maximum lenght of the entries in each column
std::vector<idx> maxlengthcols(A.cols(), 0);
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i)
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j)
if (vstr[i * A.cols() + j].size() > maxlengthcols[j])
maxlengthcols[j] = vstr[i * A.cols() + j].size();
// finally display it!
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
os << std::setw(static_cast<int>(maxlengthcols[0])) << std::right
<< vstr[i * A.cols()]; // display first column
// then the rest
for (idx j = 1; j < static_cast<idx>(A.cols()); ++j)
os << std::setw(static_cast<int>(maxlengthcols[j] + 2))
<< std::right << vstr[i * A.cols() + j];
if (i < static_cast<idx>(A.rows()) - 1)
os << std::endl;
}
return os;
}
};
} /* namespace internal */
} /* namespace qpp */
#endif /* INTERNAL_UTIL_H_ */
|
parallel.c | //OpenMP version. Edit and submit only this file.
/* Enter your details below
* Name :
* UCLA ID :
* Email :
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "utils.h"
double work_it_par(long *old, long *new, long *super, long *simple, long *fibonacci) {
int i, j, k, kk, jj, ii;
int u, v, w, u1;
int ton = 0;
long compute_it, i1, moving_average;
double pi, pi2, x, y, sum, step = 0.0;
long dot_product=0;
long nCirc=0;
long aggregate=1.0;
double r=1.0;
int was_smart = 16;
long temp = 0;
int iDim, jDim, iuDim, jDimPlus, jDimPlusPlus, simplified, uDim;
long wntf = we_need_the_func();
long gtf = gimmie_the_func();
int dimSquare = DIM*DIM;
int b = 10;
#pragma omp parallel for private(i)
for(i=0; i<DIM-1;i++)
{
super[i] += simple[i];
}
#pragma omp parallel for private(i) reduction(+:dot_product,moving_average)
for(i=0; i<DIM-1;i++)
{
dot_product += super[i]*simple[i];
moving_average = 0;
for(ton=i;ton<DIM-1-WINDOW_SIZE;ton++)
{
moving_average += simple[ton];
}
}
int a_secret = 5;
fibonacci[0] = 1;
fibonacci[1] = 1;
for(i=2; i<DIM-1;i++)
{
fibonacci[i]=fibonacci[i-1]+fibonacci[i-2];
if(i==3)
{
printf("\n A secret is: %d",obfuscate_obfuscate_obfuscate(a_secret));
}
}
step = 1.0 / NUM_STEPS;
#pragma omp parallel for private(x) reduction(+:sum)
for (i=0;i<NUM_STEPS; i++)
{
x = (i+0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
pi = step * sum;
printf("\n %d trials, Riemann flavored pi is %f \n",NUM_STEPS, pi);
#pragma omp parallel for private(x, y) reduction(+:nCirc)
for(i=0;i<NUM_TRIALS; i++)
{
x = (random()%10000000)/10000000.0;
y = (random()%10000000)/10000000.0;
if (( x*x + y*y) <= r*r)
{
nCirc++;
}
}
pi2 = 4.0 * ((double)nCirc/(double)NUM_TRIALS);
printf("\n %d trials, Monte-Carlo flavored pi is %f \n",NUM_TRIALS, pi2);
#pragma omp parallel for num_threads(16) private (j, k, compute_it, iDim, jDim, iuDim, u, v, w, simplified, uDim, kk, jj, ii) reduction(+:aggregate, temp)
for (i=1; i<DIM-1-b; i+=b) {
for (j=1; j<DIM-1-b; j+=b) {
for (k=1; k<DIM-1-b; k+=b) {
for (ii = i; ii < i+b; ii++){
iDim = ii*dimSquare;
for (jj = j; jj < j+b; jj++){
jDim = jj*DIM;
for (kk = k; kk < k+b; kk++){
simplified = iDim + jDim + kk;
compute_it = old[simplified] * wntf;
aggregate += compute_it / gtf;
temp = 0;
for (u=-1; u<=1; u++) {
uDim = u*dimSquare;
temp += old[simplified-DIM-1+uDim];
temp += old[simplified-DIM+uDim];
temp += old[simplified-DIM+1+uDim];
temp += old[simplified-1+uDim];
temp += old[simplified+uDim];
temp += old[simplified+1+uDim];
temp += old[simplified+DIM-1+uDim];
temp += old[simplified+DIM+uDim];
temp += old[simplified+DIM+1+uDim];
}
temp/=27;
new[simplified] = temp;
}
}
}
}
}
}
printf("AGGR:%ld\n",aggregate);
for (i=1; i<DIM-1; i++) {
iDim = i*dimSquare;
for (j=1; j<DIM-1; j+=3) {
jDim = j*DIM;
jDimPlus = (j+1)*DIM;
jDimPlusPlus = (j+2)*DIM;
for (k=1; k<DIM-1; k+=3) {
u= (new[iDim+jDim+k] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDim+k+1] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDim+k+2] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k+1]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k+2]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k+1]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k+2]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
}
}
}
/* for (i=1; i<DIM-1; i++) {
for (j=1; j<DIM-1; j++) {
for (k=1; k<DIM-1; k++) {
new[i*DIM*DIM+j*DIM+k]=0;
for (u=-1; u<=1; u++) {
for (v=-1; v<=1; v++) {
for (w=-1; w<=1; w++) {
new[i*DIM*DIM+j*DIM+k]+=old[(i+u)*DIM*DIM+(j+v)*DIM+(k+w)];
}
}
}
new[i*DIM*DIM+j*DIM+k]/=27;
}
}
}
for (i=1; i<DIM-1; i++) {
for (j=1; j<DIM-1; j++) {
for (k=1; k<DIM-1; k++) {
u=(new[i*DIM*DIM+j*DIM+k]/100);
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
}
}
} */
return (double) (dot_product+moving_average+pi+pi2);
}
|
convolution_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_transform_kernel_pack4_bf16s_neon(const Mat& weight_data, Mat& weight_data_bf16, int num_input, int num_output, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = 4b-4a-kw-kh-inch/4a-outch/4b
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_bf16.create(maxk, num_input / 4, num_output / 4, (size_t)2 * 16, 16);
for (int q = 0; q + 3 < num_output; q += 4)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
Mat g0 = weight_data_bf16.channel(q / 4);
for (int p = 0; p + 3 < num_input; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
unsigned short* g00 = g0.row<unsigned short>(p / 4);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k01[k]);
g00[5] = float32_to_bfloat16(k11[k]);
g00[6] = float32_to_bfloat16(k21[k]);
g00[7] = float32_to_bfloat16(k31[k]);
g00[8] = float32_to_bfloat16(k02[k]);
g00[9] = float32_to_bfloat16(k12[k]);
g00[10] = float32_to_bfloat16(k22[k]);
g00[11] = float32_to_bfloat16(k32[k]);
g00[12] = float32_to_bfloat16(k03[k]);
g00[13] = float32_to_bfloat16(k13[k]);
g00[14] = float32_to_bfloat16(k23[k]);
g00[15] = float32_to_bfloat16(k33[k]);
g00 += 16;
}
}
}
}
static void convolution_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
unsigned short* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _sum = vdupq_n_f32(0.f);
if (bias_data_ptr)
{
_sum = vld1q_f32(bias_data_ptr + p * 4);
}
const unsigned short* kptr = weight_data_bf16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const unsigned short* sptr = m.row<const unsigned short>(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
float32x4_t _val = vcvt_f32_bf16(vld1_u16(sptr + space_ofs[k] * 4));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr));
float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr + 4));
float32x4_t _w2 = vcvt_f32_bf16(vld1_u16(kptr + 8));
float32x4_t _w3 = vcvt_f32_bf16(vld1_u16(kptr + 12));
#if __aarch64__
_sum = vmlaq_laneq_f32(_sum, _w0, _val, 0);
_sum = vmlaq_laneq_f32(_sum, _w1, _val, 1);
_sum = vmlaq_laneq_f32(_sum, _w2, _val, 2);
_sum = vmlaq_laneq_f32(_sum, _w3, _val, 3);
#else
_sum = vmlaq_lane_f32(_sum, _w0, vget_low_f32(_val), 0);
_sum = vmlaq_lane_f32(_sum, _w1, vget_low_f32(_val), 1);
_sum = vmlaq_lane_f32(_sum, _w2, vget_high_f32(_val), 0);
_sum = vmlaq_lane_f32(_sum, _w3, vget_high_f32(_val), 1);
#endif
kptr += 16;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
vst1_u16(outptr + j * 4, vcvt_bf16_f32(_sum));
}
outptr += outw * 4;
}
}
}
|
GB_binop__ge_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint32)
// A*D function (colscale): GB (_AxD__ge_uint32)
// D*A function (rowscale): GB (_DxB__ge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint32)
// C=scalar+B GB (_bind1st__ge_uint32)
// C=scalar+B' GB (_bind1st_tran__ge_uint32)
// C=A+scalar GB (_bind2nd__ge_uint32)
// C=A'+scalar GB (_bind2nd_tran__ge_uint32)
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_doacross.c | // RUN: %libomp-compile-and-run
// XFAIL: gcc-4, gcc-5, clang-3.7, clang-3.8, icc-15, icc-16
#include <stdio.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#ifndef N
#define N 750
#endif
int test_doacross() {
int i, j;
// Allocate and zero out the matrix
int *m = (int *)malloc(sizeof(int) * N * N);
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
m[i * N + j] = 0;
}
}
// Have first row and column be 0, 1, 2, 3, etc.
for (i = 0; i < N; ++i)
m[i * N] = i;
for (j = 0; j < N; ++j)
m[j] = j;
// Perform wavefront which results in matrix:
// 0 1 2 3 4
// 1 2 3 4 5
// 2 3 4 5 6
// 3 4 5 6 7
// 4 5 6 7 8
#pragma omp parallel shared(m)
{
int row, col;
#pragma omp for ordered(2)
for (row = 1; row < N; ++row) {
for (col = 1; col < N; ++col) {
#pragma omp ordered depend(sink : row - 1, col) depend(sink : row, col - 1)
m[row * N + col] = m[(row - 1) * N + col] + m[row * N + (col - 1)] -
m[(row - 1) * N + (col - 1)];
#pragma omp ordered depend(source)
}
}
}
// Check the bottom right element to see if iteration dependencies were held
int retval = (m[(N - 1) * N + N - 1] == 2 * (N - 1));
free(m);
return retval;
}
int main(int argc, char **argv) {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_doacross()) {
num_failed++;
}
}
return num_failed;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-126,128),ceild(8*t2-Nz-499,512)),ceild(24*t3-Ny-499,512));t4<=min(min(floord(4*Nt+Nx-9,512),floord(4*t1+Nx-1,512)),floord(24*t3+Nx+11,512));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),128*t4+126);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
mkl_quantized_conv_ops.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest());
// Adjusting for having a symmetric range.
// for example: for 8-bit [-127, 127] as opposed to [-128, 127].
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#pragma omp parallel for
for (size_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
|
tinyexr.h | /*
Copyright (c) 2014 - 2018, Syoyo Fujita
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-5)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-6)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7)
#define TINYEXR_ERROR_INVALID_HEADER (-8)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if succes.
// Returns negative value and may set error string in `err` when there's an
// error
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
#include <string>
#include <vector>
#include <limits>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n,
use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d,
(d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
}
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static const int kEXRVersionSize = 8;
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
return false;
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(
&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
if (0 > (maxLength -= count)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static void DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
assert(ret == static_cast<int>(uncompressed_size));
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap,
minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
static_cast<int>(tmpBufSize));
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
assert(ret);
(void)ret;
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
&dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len));
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr +
c * static_cast<size_t>(width) * sizeof(unsigned short));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(float));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int));
unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
assert(0);
}
}
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) = "# of channels is zero.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES);
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels);
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size - (offsets[tile_idx] + sizeof(int) * 5);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size - (offsets[y_idx] + sizeof(int) * 2);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else {
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
//assert(num_lines > 0);
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
line_no -= exr_header->data_window[1];
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, data_width, y,
line_no, num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
if (err) {
(*err) = "Invalid data height value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if ((data_width < 0) || (data_height < 0)) {
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
if (err) {
(*err) = "Cannot reconstruct lineOffset table.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
return DecodeChunk(exr_image, exr_header, offsets, head, size);
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
if (err) {
(*err) = "Loading multipart or DeepImage is not supported yet.\n";
}
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) {
// Alpha channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
} else {
// Assume RGB(A)
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // May leak
#else
(*err) = strdup(err_str.c_str()); // May leak
#endif
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return 0; // @fixme
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
if (err) {
(*err) = "Pixel type must be FLOAT for ZFP compression.";
}
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = f32.f;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = h16.u;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
1024 + static_cast<unsigned int>(
1.2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
data.insert(data.end(), data_list[i].begin(), data_list[i].end());
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = static_cast<unsigned char *>(malloc(memory.size()));
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((!errcode) || (!fp)) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Invalid magic number.";
}
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
if (err) {
(*err) = "Unsupported format.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen,
data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f = *reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // may leak
#else
(*err) = strdup(err_str.c_str()); // may leak
#endif
}
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
if (err) {
(*err) = "`chunkCount' attribute is not found in the header.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset size.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
assert(0);
return TINYEXR_ERROR_INVALID_DATA;
}
}
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// Assume at least 16x16 pixels.
if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
const char *err;
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
initialise_chunk_kernel_c.c | /*Crown Copyright 2012 AWE.
*
* This file is part of CloverLeaf.
*
* CloverLeaf is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* CloverLeaf is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* CloverLeaf. If not, see http://www.gnu.org/licenses/. */
/**
* @brief Driver for chunk initialisation.
* @author Wayne Gaudin
* @details Invokes the user specified chunk initialisation kernel.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ftocmacros.h"
#include <math.h>
void initialise_chunk_kernel_c_(int *xmin,int *xmax,int *ymin,int *ymax,
double *minx,
double *miny,
double *dx,
double *dy,
double *vertexx,
double *vertexdx,
double *vertexy,
double *vertexdy,
double *cellx,
double *celldx,
double *celly,
double *celldy,
double *volume,
double *xarea,
double *yarea)
{
int x_min=*xmin;
int x_max=*xmax;
int y_min=*ymin;
int y_max=*ymax;
double min_x=*minx;
double min_y=*miny;
double d_x=*dx;
double d_y=*dy;
int j,k;
#pragma omp parallel
{
#pragma omp for private(j)
#pragma ivdep
for (j=x_min-2;j<=x_max+3;j++) {
vertexx[FTNREF1D(j,x_min-2)]=min_x+d_x*(double)(j-x_min);
}
#pragma omp for private(j)
#pragma ivdep
for (j=x_min-2;j<=x_max+3;j++) {
vertexdx[FTNREF1D(j,x_min-2)]=d_x;
}
#pragma omp for private(j,k)
#pragma ivdep
for (k=y_min-2;k<=y_max+3;k++) {
vertexy[FTNREF1D(k,y_min-2)]=min_y+d_y*(double)(k-y_min);
}
#pragma omp for private(j)
#pragma ivdep
for (k=y_min-2;k<=y_max+3;k++) {
vertexdy[FTNREF1D(k,y_min-2)]=d_y;
}
#pragma omp for private(j)
#pragma ivdep
for (j=x_min-2;j<=x_max+2;j++) {
cellx[FTNREF1D(j,x_min-2)]=0.5*(vertexx[FTNREF1D(j,x_min-2)]+vertexx[FTNREF1D(j+1,x_min-2)]);
}
#pragma omp for private(j)
#pragma ivdep
for (j=x_min-2;j<=x_max+2;j++) {
celldx[FTNREF1D(j,x_min-2)]=d_x;
}
#pragma omp for private(j)
#pragma ivdep
for (k=y_min-2;k<=y_max+2;k++) {
celly[FTNREF1D(k,y_min-2)]=0.5*(vertexy[FTNREF1D(k,y_min-2)]+vertexy[FTNREF1D(k+1,x_min-2)]);
}
#pragma omp for private(j)
#pragma ivdep
for (k=y_min-2;k<=y_max+2;k++) {
celldy[FTNREF1D(k,y_min-2)]=d_y;
}
#pragma omp for private(j,k)
for (k=y_min-2;k<=y_max+2;k++) {
#pragma ivdep
for (j=x_min-2;j<=x_max+2;j++) {
volume[FTNREF2D(j,k,x_max+4,x_min-2,y_min-2)]=d_x*d_y;
}
}
#pragma omp for private(j,k)
for (k=y_min-2;k<=y_max+2;k++) {
#pragma ivdep
for (j=x_min-2;j<=x_max+2;j++) {
xarea[FTNREF2D(j,k,x_max+5,x_min-2,y_min-2)]=celldy[FTNREF1D(k,y_min-2)];
}
}
#pragma omp for private(j,k)
for (k=y_min-2;k<=y_max+2;k++) {
#pragma ivdep
for (j=x_min-2;j<=x_max+2;j++) {
yarea[FTNREF2D(j,k,x_max+4,x_min-2,y_min-2)]=celldx[FTNREF1D(j,x_min-2)];
}
}
}
}
|
test_core.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (c) 2012 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or modify it under the
* terms of the version 2.1 (or later) of the GNU Lesser General Public License
* as published by the Free Software Foundation; or version 2.0 of the Apache
* License as published by the Apache Software Foundation. See the LICENSE files
* for more details.
*
* RELIC is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the LICENSE files for more details.
*
* You should have received a copy of the GNU Lesser General Public or the
* Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/>
* or <https://www.apache.org/licenses/>.
*/
/**
* @file
*
* Tests for configuration management.
*
* @ingroup test
*/
#include <stdio.h>
#include "relic.h"
#include "relic_test.h"
#if defined(MULTI)
#if MULTI == PTHREAD
void *master(void *ptr) {
int *code = (int *)ptr;
core_init();
RLC_THROW(ERR_NO_MEMORY);
if (err_get_code() != RLC_ERR) {
*code = RLC_ERR;
} else {
*code = RLC_OK;
}
core_clean();
return NULL;
}
void *tester(void *ptr) {
int *code = (int *)ptr;
core_init();
if (err_get_code() != RLC_OK) {
*code = RLC_ERR;
} else {
*code = RLC_OK;
}
core_clean();
return NULL;
}
#endif
#endif
int main(void) {
int code = RLC_ERR;
/* Initialize library with default configuration. */
if (core_init() != RLC_OK) {
core_clean();
return 1;
}
util_banner("Tests for the CORE module:\n", 0);
TEST_ONCE("the library context is consistent") {
TEST_ASSERT(core_get() != NULL, end);
} TEST_END;
TEST_ONCE("switching the library context is correct") {
ctx_t new_ctx, *old_ctx;
/* Backup the old context. */
old_ctx = core_get();
/* Switch the library context. */
core_set(&new_ctx);
/* Reinitialize library with new context. */
core_init();
/* Run function to manipulate the library context. */
RLC_THROW(ERR_NO_MEMORY);
core_set(old_ctx);
TEST_ASSERT(err_get_code() == RLC_OK, end);
core_set(&new_ctx);
TEST_ASSERT(err_get_code() == RLC_ERR, end);
/* Now we need to finalize the new context. */
core_clean();
/* And restore the original context. */
core_set(old_ctx);
} TEST_END;
code = RLC_OK;
#if defined(MULTI)
#if MULTI == OPENMP
TEST_ONCE("library context is thread-safe") {
omp_set_num_threads(CORES);
#pragma omp parallel shared(code)
{
if (omp_get_thread_num() == 0) {
RLC_THROW(ERR_NO_MEMORY);
if (err_get_code() != RLC_ERR) {
code = RLC_ERR;
}
} else {
core_init();
if (err_get_code() != RLC_OK) {
code = RLC_ERR;
}
core_clean();
}
}
TEST_ASSERT(code == RLC_OK, end);
core_init();
#pragma omp parallel copyin(core_ctx) shared(code)
{
if (core_get() == NULL) {
code = RLC_ERR;
}
}
TEST_ASSERT(code == RLC_OK, end);
core_clean();
} TEST_END;
#endif
#if MULTI == PTHREAD
TEST_ONCE("library context is thread-safe") {
pthread_t thread[CORES];
int result[CORES] = { RLC_OK };
for (int i = 0; i < CORES; i++) {
if (i == 0) {
if (pthread_create(&(thread[0]), NULL, master, &(result[0]))) {
code = RLC_ERR;
}
} else {
if (pthread_create(&(thread[i]), NULL, tester, &(result[i]))) {
code = RLC_ERR;
}
}
if (result[i] != RLC_OK) {
code = RLC_ERR;
}
}
for (int i = 0; i < CORES; i++) {
if (pthread_join(thread[i], NULL)) {
code = RLC_ERR;
}
}
TEST_ASSERT(code == RLC_OK, end);
} TEST_END;
#endif
#endif
util_banner("All tests have passed.\n", 0);
end:
core_clean();
return code;
}
|
GB_convert_full_to_sparse.c | //------------------------------------------------------------------------------
// GB_convert_full_to_sparse: convert a matrix from full to sparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GB_convert_full_to_sparse // convert matrix from full to sparse
(
GrB_Matrix A, // matrix to convert from full to sparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converting full to sparse", GB0) ;
ASSERT (GB_IS_FULL (A) || GB_nnz_max (A) == 0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (!GB_IS_SPARSE (A)) ;
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
GBURBLE ("(full to sparse) ") ;
//--------------------------------------------------------------------------
// allocate A->p and A->i
//--------------------------------------------------------------------------
int64_t avdim = A->vdim ;
int64_t avlen = A->vlen ;
int64_t anz = GB_nnz_full (A) ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
int64_t *restrict Ai = NULL ; size_t Ai_size = 0 ;
Ap = GB_MALLOC (avdim+1, int64_t, &Ap_size) ;
Ai = GB_MALLOC (anz, int64_t, &Ai_size) ;
if (Ap == NULL || Ai == NULL)
{
// out of memory
GB_FREE (&Ap, Ap_size) ;
GB_FREE (&Ai, Ai_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
A->p = Ap ; A->p_size = Ap_size ;
A->i = Ai ; A->i_size = Ai_size ;
A->plen = avdim ;
A->nvec = avdim ;
A->nvec_nonempty = (avlen == 0) ? 0 : avdim ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// fill the A->p and A->i pattern
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k <= avdim ; k++)
{
Ap [k] = k * avlen ;
}
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
Ai [p] = p % avlen ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converted from full to sparse", GB0) ;
ASSERT (GB_IS_SPARSE (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
return (GrB_SUCCESS) ;
}
|
GB_binop__bget_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_uint8
// A.*B function (eWiseMult): GB_AemultB__bget_uint8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bget_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint8
// C=scalar+B GB_bind1st__bget_uint8
// C=scalar+B' GB_bind1st_tran__bget_uint8
// C=A+scalar GB_bind2nd__bget_uint8
// C=A'+scalar GB_bind2nd_tran__bget_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITGET (x, y, uint8_t, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bget_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \
}
GrB_Info GB_bind1st_tran__bget_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \
}
GrB_Info GB_bind2nd_tran__bget_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_newton_raphson_mpc_contact_strategy.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Contact criteria
#include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "utilities/atomic_utilities.h"
// // Processes
// #include "processes/fast_transfer_between_model_parts_process.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonMPCContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonMPCContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedNewtonRaphsonMPCContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType;
typedef std::size_t IndexType;
typedef std::size_t SizeType;
/**
* @brief Default constructor
*/
explicit ResidualBasedNewtonRaphsonMPCContactStrategy()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonMPCContactStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonMPCContactStrategy() override
= default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename StrategyBaseType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
BaseType::Predict();
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// We get the system
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// We solve the system in order to check the active set once
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
typename TSchemeType::Pointer p_scheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver();
p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
// Check active set
const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel();
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria);
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
// Computing nodal weights
ComputeNodalWeights();
BaseType::Initialize();
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
// Computing nodal weights
ComputeNodalWeights();
BaseType::InitializeSolutionStep();
// // If enforcing NTN
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// EnforcingNTN();
// }
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
BaseType::FinalizeSolutionStep();
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
if (r_process_info.Is(INTERACTION)) {
// We get the system
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
int inner_iteration = 0;
const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel();
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) {
KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
is_converged = AuxiliarSolveSolutionStep();
// We check the convergence
if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check
is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) {
if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
is_converged = AuxiliarSolveSolutionStep();
}
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method)
*/
bool AuxiliarSolveSolutionStep()
{
// Getting flag INTERACTION
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool();
VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions());
// Pointers needed in the solution
typename TSchemeType::Pointer p_scheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// Initializing the parameters of the Newton-Raphson cycle
unsigned int iteration_number = 1;
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
// Computing nodal weights
ComputeNodalWeights();
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// // If enforcing NTN
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// EnforcingNTN();
// }
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx); //Dx=0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
}
// Iteration Cycle... performed only for NonLinearProblems
while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) {
// Setting the number of iteration
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
// Computing nodal weights
ComputeNodalWeights();
// Calling InitializeNonLinIteration
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
// Shaping correctly the system
if (update_each_nl_iteration) {
p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part);
p_builder_and_solver->SetUpSystem(r_model_part);
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part);
}
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) {
if (!BaseType::GetKeepSystemConstantDuringIterations()) {
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber) {
BaseType::MaxIterationsExceeded();
} else {
KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl;
}
// Recalculate residual if needed (note that some convergence criteria need it to be recalculated)
if (!residual_is_updated) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "newton_raphson_mpc_contact_strategy",
"inner_loop_iterations" : 5,
"update_each_nl_iteration" : false,
"enforce_ntn" : false
})" );
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "newton_raphson_mpc_contact_strategy";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
// Copy the parameters
mThisParameters = ThisParameters;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
// /**
// * @brief This inforces NTN formulation
// */
// void EnforcingNTN()
// {
// // List of enforced nodes to not repeat
// std::unordered_set<IndexType> enforced_nodes;
//
// // Getting contact model part
// ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart();
// ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact");
//
// // The process info
// const auto& r_process_info = r_root_model_part.GetProcessInfo();
//
// // Reset the pointers of the conditions
// for (auto& r_cond : r_computing_contact_model_part.Conditions()) {
// if (r_cond.Has(CONSTRAINT_POINTER)) {
// r_cond.SetValue(CONSTRAINT_POINTER, nullptr);
// }
// }
//
// // Iterate over the constraints
// IndexType counter = 1;
// for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) {
// r_const.SetId(counter);
// ++counter;
// }
//
// // Auxiliar classes
// Matrix original_relation_matrix, relation_matrix;
// Vector original_constant_vector, constant_vector;
// ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs;
//
// // Iterate over the constraints
// for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) {
// // Getting original system
// r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info);
// r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info);
//
// // TODO: Finish rebuild
//
// // Creating new constraint
// r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector);
//
// // Setting to remove the old constraints
// r_const.Set(TO_ERASE, true);
//
// ++counter;
// }
//
// // Remove old constraints
// r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE);
//
// // Transfer constraints from the root to the computing model part
// FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute();
//
// // Reorder ids
// counter = 1;
// for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) {
// r_const.SetId(counter);
// ++counter;
// }
// }
/**
* @brief This computes the nodal weights
*/
void ComputeNodalWeights()
{
// Getting contact model part
ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact");
// Reset the NODAL_PAUX and NODAL_MAUX
auto& r_nodes_array = r_contact_model_part.Nodes();
VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array);
VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array);
// We set the constraints active and inactive in function of the active set
auto& r_conditions_array = r_contact_model_part.Conditions();
auto it_cond_begin = r_conditions_array.begin();
// If enforcing NTN
const bool enforce_ntn = false;
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array);
// }
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
// Only slave conditions
if (it_cond->Is(SLAVE)) {
auto& r_geometry = it_cond->GetGeometry();
Vector lumping_factor;
lumping_factor = r_geometry.LumpingFactors(lumping_factor);
const double domain_size = r_geometry.DomainSize();
for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) {
auto& r_node = r_geometry[i_node];
if (!enforce_ntn) {
AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0);
}
AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size);
}
}
}
}
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
|
GB_binop__bshift_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint64)
// C=scalar+B GB (_bind1st__bshift_uint64)
// C=scalar+B' GB (_bind1st_tran__bshift_uint64)
// C=A+scalar GB (_bind2nd__bshift_uint64)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_uint64 (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
aux_parcsr_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_AuxParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "aux_parcsr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixCreate( hypre_AuxParCSRMatrix **aux_matrix,
HYPRE_Int local_num_rows,
HYPRE_Int local_num_cols,
HYPRE_Int *sizes )
{
hypre_AuxParCSRMatrix *matrix;
matrix = hypre_CTAlloc(hypre_AuxParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRows(matrix) = local_num_rows;
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rows;
hypre_AuxParCSRMatrixLocalNumCols(matrix) = local_num_cols;
hypre_AuxParCSRMatrixRowSpace(matrix) = sizes;
/* set defaults */
hypre_AuxParCSRMatrixNeedAux(matrix) = 1;
hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) = 0;
hypre_AuxParCSRMatrixCurrentOffProcElmts(matrix) = 0;
hypre_AuxParCSRMatrixOffProcIIndx(matrix) = 0;
hypre_AuxParCSRMatrixRownnz(matrix) = NULL;
hypre_AuxParCSRMatrixRowLength(matrix) = NULL;
hypre_AuxParCSRMatrixAuxJ(matrix) = NULL;
hypre_AuxParCSRMatrixAuxData(matrix) = NULL;
hypre_AuxParCSRMatrixIndxDiag(matrix) = NULL;
hypre_AuxParCSRMatrixIndxOffd(matrix) = NULL;
hypre_AuxParCSRMatrixDiagSizes(matrix) = NULL;
hypre_AuxParCSRMatrixOffdSizes(matrix) = NULL;
/* stash for setting or adding on/off-proc values */
hypre_AuxParCSRMatrixOffProcI(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcJ(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcData(matrix) = NULL;
hypre_AuxParCSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixMaxStackElmts(matrix) = 0;
hypre_AuxParCSRMatrixCurrentStackElmts(matrix) = 0;
hypre_AuxParCSRMatrixStackI(matrix) = NULL;
hypre_AuxParCSRMatrixStackJ(matrix) = NULL;
hypre_AuxParCSRMatrixStackData(matrix) = NULL;
hypre_AuxParCSRMatrixStackSorA(matrix) = NULL;
hypre_AuxParCSRMatrixUsrOnProcElmts(matrix) = -1;
hypre_AuxParCSRMatrixUsrOffProcElmts(matrix) = -1;
hypre_AuxParCSRMatrixInitAllocFactor(matrix) = 5.0;
hypre_AuxParCSRMatrixGrowFactor(matrix) = 2.0;
#endif
*aux_matrix = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixDestroy( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int num_rownnz;
HYPRE_Int num_rows;
HYPRE_Int *rownnz;
HYPRE_Int i;
if (matrix)
{
rownnz = hypre_AuxParCSRMatrixRownnz(matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(matrix);
num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
if (hypre_AuxParCSRMatrixAuxJ(matrix))
{
if (hypre_AuxParCSRMatrixRownnz(matrix))
{
for (i = 0; i < num_rownnz; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz[i]], HYPRE_MEMORY_HOST);
}
}
else
{
for (i = 0; i < num_rows; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix), HYPRE_MEMORY_HOST);
}
if (hypre_AuxParCSRMatrixAuxData(matrix))
{
if (hypre_AuxParCSRMatrixRownnz(matrix))
{
for (i = 0; i < num_rownnz; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz[i]], HYPRE_MEMORY_HOST);
}
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix), HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[i], HYPRE_MEMORY_HOST);
}
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix), HYPRE_MEMORY_HOST);
}
}
hypre_TFree(hypre_AuxParCSRMatrixRownnz(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixRowLength(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixRowSpace(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixIndxDiag(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixIndxOffd(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixDiagSizes(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffdSizes(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcI(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcJ(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcData(matrix), HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_TFree(hypre_AuxParCSRMatrixStackI(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackJ(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackData(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackSorA(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
#endif
hypre_TFree(matrix, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixSetRownnz
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixSetRownnz( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int local_num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
HYPRE_Int *row_space = hypre_AuxParCSRMatrixRowSpace(matrix);
HYPRE_Int num_rownnz_old = hypre_AuxParCSRMatrixLocalNumRownnz(matrix);
HYPRE_Int *rownnz_old = hypre_AuxParCSRMatrixRownnz(matrix);
HYPRE_Int *rownnz;
HYPRE_Int i, ii, local_num_rownnz;
/* Count number of nonzero rows */
local_num_rownnz = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:local_num_rownnz) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
local_num_rownnz++;
}
}
if (local_num_rownnz != local_num_rows)
{
rownnz = hypre_CTAlloc(HYPRE_Int, local_num_rownnz, HYPRE_MEMORY_HOST);
/* Find nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
rownnz[local_num_rownnz++] = i;
}
}
/* Free memory if necessary */
if (rownnz_old && rownnz && (local_num_rownnz < num_rownnz_old))
{
ii = 0;
for (i = 0; i < num_rownnz_old; i++)
{
if (rownnz_old[i] == rownnz[ii])
{
ii++;
}
else
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz_old[i]], HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz_old[i]], HYPRE_MEMORY_HOST);
}
if (ii == local_num_rownnz)
{
i = i + 1;
for (; i < num_rownnz_old; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz_old[i]],
HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz_old[i]],
HYPRE_MEMORY_HOST);
}
break;
}
}
}
hypre_TFree(rownnz_old, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rownnz;
hypre_AuxParCSRMatrixRownnz(matrix) = rownnz;
}
else
{
hypre_TFree(rownnz_old, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rows;
hypre_AuxParCSRMatrixRownnz(matrix) = NULL;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixInitialize_v2
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixInitialize_v2( hypre_AuxParCSRMatrix *matrix,
HYPRE_MemoryLocation memory_location )
{
HYPRE_Int local_num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
HYPRE_Int max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(matrix);
hypre_AuxParCSRMatrixMemoryLocation(matrix) = memory_location;
if (local_num_rows < 0)
{
return -1;
}
if (local_num_rows == 0)
{
return 0;
}
if (memory_location != HYPRE_MEMORY_HOST)
{
/* GPU assembly */
hypre_AuxParCSRMatrixNeedAux(matrix) = 1;
}
else
{
/* CPU assembly */
/* allocate stash for setting or adding off processor values */
if (max_off_proc_elmts > 0)
{
hypre_AuxParCSRMatrixOffProcI(matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
}
if (hypre_AuxParCSRMatrixNeedAux(matrix))
{
HYPRE_Int *row_space = hypre_AuxParCSRMatrixRowSpace(matrix);
HYPRE_Int *rownnz = hypre_AuxParCSRMatrixRownnz(matrix);
HYPRE_BigInt **aux_j = hypre_CTAlloc(HYPRE_BigInt *, local_num_rows, HYPRE_MEMORY_HOST);
HYPRE_Complex **aux_data = hypre_CTAlloc(HYPRE_Complex *, local_num_rows, HYPRE_MEMORY_HOST);
HYPRE_Int local_num_rownnz;
HYPRE_Int i, ii;
if (row_space)
{
/* Count number of nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
local_num_rownnz++;
}
}
if (local_num_rownnz != local_num_rows)
{
rownnz = hypre_CTAlloc(HYPRE_Int, local_num_rownnz, HYPRE_MEMORY_HOST);
/* Find nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
rownnz[local_num_rownnz++] = i;
}
}
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rownnz;
hypre_AuxParCSRMatrixRownnz(matrix) = rownnz;
}
}
if (!hypre_AuxParCSRMatrixRowLength(matrix))
{
hypre_AuxParCSRMatrixRowLength(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
if (row_space)
{
if (local_num_rownnz != local_num_rows)
{
for (i = 0; i < local_num_rownnz; i++)
{
ii = rownnz[i];
aux_j[ii] = hypre_CTAlloc(HYPRE_BigInt, row_space[ii], HYPRE_MEMORY_HOST);
aux_data[ii] = hypre_CTAlloc(HYPRE_Complex, row_space[ii], HYPRE_MEMORY_HOST);
}
}
else
{
for (i = 0; i < local_num_rows; i++)
{
aux_j[i] = hypre_CTAlloc(HYPRE_BigInt, row_space[i], HYPRE_MEMORY_HOST);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, row_space[i], HYPRE_MEMORY_HOST);
}
}
}
else
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = 30;
aux_j[i] = hypre_CTAlloc(HYPRE_BigInt, 30, HYPRE_MEMORY_HOST);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, 30, HYPRE_MEMORY_HOST);
}
hypre_AuxParCSRMatrixRowSpace(matrix) = row_space;
}
hypre_AuxParCSRMatrixAuxJ(matrix) = aux_j;
hypre_AuxParCSRMatrixAuxData(matrix) = aux_data;
}
else
{
hypre_AuxParCSRMatrixIndxDiag(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixIndxOffd(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_AuxParCSRMatrixInitialize(hypre_AuxParCSRMatrix *matrix)
{
if (matrix)
{
return hypre_AuxParCSRMatrixInitialize_v2(matrix, hypre_AuxParCSRMatrixMemoryLocation(matrix));
}
return -2;
}
|
stream.c | /* This is a modified version of stream */
/*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
# include <unistd.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
/*#define USESTACK*/
#ifdef USESTACK
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
#else
#include <stdlib.h>
STREAM_TYPE* a;
STREAM_TYPE* b;
STREAM_TYPE* c;
#endif
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef __SSE3__
#include <immintrin.h>
#include <omp.h>
#define TUNED
#endif
#ifdef BENCH_POWER8
#include <altivec.h>
#define TUNED
#endif
#ifdef BENCH_ARMV8
#define TUNED
#endif
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
#ifndef USESTACK
posix_memalign((void**)&a, 2097152, ((size_t)STREAM_ARRAY_SIZE)*sizeof(double));
posix_memalign((void**)&b, 2097152, ((size_t)STREAM_ARRAY_SIZE)*sizeof(double));
posix_memalign((void**)&c, 2097152, ((size_t)STREAM_ARRAY_SIZE)*sizeof(double));
#endif
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* check for array size for simple load balancing */
if ( STREAM_ARRAY_SIZE % (k*512) != 0 ) {
printf("Please define STREAM_ARRAY_SIZE such that it is divisable by number_of_threads*512!\n Exiting...\n");
return -1;
}
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = b[j]+scalar*c[j];
}
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
char hostname[255];
gethostname(hostname, 255);
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s %s%12.1f %11.6f %11.6f %11.6f\n", hostname, label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
#pragma omp parallel
{
ssize_t j;
#ifdef _OPENMP
ssize_t chunk = STREAM_ARRAY_SIZE/omp_get_num_threads();
ssize_t start = chunk*omp_get_thread_num();
#else
ssize_t chunk = STREAM_ARRAY_SIZE;
ssize_t start = 0;
#endif
#ifdef BENCH_AVX512
for (j=start; j< start+chunk; j+=8)
_mm512_stream_pd(&c[j], _mm512_load_pd(&a[j]));
#endif
#ifdef BENCH_AVX
for (j=start; j< start+chunk; j+=4)
_mm256_stream_pd(&c[j], _mm256_load_pd(&a[j]));
#endif
#ifdef BENCH_SSE
for (j=start; j< start+chunk; j+=2)
_mm_stream_pd(&c[j], _mm_load_pd(&a[j]));
#endif
#ifdef BENCH_POWER8
for (j=start; j< start+chunk; j+=2)
vec_vsx_st(vec_vsx_ld(0, &a[j]), 0, &c[j]);
#endif
#ifdef BENCH_ARMV8
__asm__ __volatile__("mov x0, %0\n\t" // a
"mov x1, %1\n\t" // c
"mov x4, %2\n\t" // chunk
"1:\n\t"
"ldr d0, [x0]\n\t"
"ldr d1, [x0,8]\n\t"
"stnp d0, d1, [x1]\n\t"
"ldr d2, [x0,16]\n\t"
"ldr d3, [x0,24]\n\t"
"stnp d2, d3, [x1,16]\n\t"
"add x0, x0, #32\n\t"
"add x1, x1, #32\n\t"
"sub x4, x4, #4\n\t"
"cbnz x4, 1b\n\t"
: : "r" (&a[start]), "r" (&c[start]), "r" (chunk) : "x0","x1","x4","d0","d1","d2","d3");
#endif
#ifndef BENCH_AVX512
#ifndef BENCH_AVX2
#ifndef BENCH_AVX
#ifndef BENCH_SSE
#ifndef BENCH_POWER8
#ifndef BENCH_ARMV8
for (j=start; j< start+chunk; j++)
c[j] = a[j];
#endif
#endif
#endif
#endif
#endif
#endif
}
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
#ifdef BENCH_AVX512
__m512d vecscalar = _mm512_set1_pd(scalar);
#endif
#ifdef BENCH_AVX
__m256d vecscalar = _mm256_set1_pd(scalar);
#endif
#ifdef BENCH_SSE
__m128d vecscalar = _mm_set1_pd(scalar);
#endif
#ifdef BENCH_POWER8
__attribute__((aligned(128))) STREAM_TYPE pumped_scalar[2] = {scalar, scalar};
vector double vecscalar = vec_vsx_ld(0, pumped_scalar);
#endif
#pragma omp parallel
{
ssize_t j;
#ifdef _OPENMP
ssize_t chunk = STREAM_ARRAY_SIZE/omp_get_num_threads();
ssize_t start = chunk*omp_get_thread_num();
#else
ssize_t chunk = STREAM_ARRAY_SIZE;
ssize_t start = 0;
#endif
#ifdef BENCH_AVX512
for (j=start; j< start+chunk; j+=8)
_mm512_stream_pd(&b[j], _mm512_mul_pd(vecscalar, _mm512_load_pd(&c[j])));
#endif
#ifdef BENCH_AVX
for (j=start; j< start+chunk; j+=4)
_mm256_stream_pd(&b[j], _mm256_mul_pd(vecscalar, _mm256_load_pd(&c[j])));
#endif
#ifdef BENCH_SSE
for (j=start; j< start+chunk; j+=2)
_mm_stream_pd(&b[j], _mm_mul_pd(vecscalar, _mm_load_pd(&c[j])));
#endif
#ifdef BENCH_POWER8
for (j=start; j< start+chunk; j+=2)
vec_vsx_st(vec_mul(vecscalar, vec_vsx_ld(0, &c[j])), 0, &b[j]);
#endif
#ifdef BENCH_ARMV8
__asm__ __volatile__("mov x0, %0\n\t" // b
"mov x1, %1\n\t" // c
"mov x3, %2\n\t" // scalar
"mov x4, %3\n\t" // chunk
"ldr d6, [x3]\n\t"
"1:\n\t"
"ldr d0, [x1]\n\t"
"ldr d1, [x1,8]\n\t"
"fmul d0, d0, d6\n\t"
"fmul d1, d1, d6\n\t"
"stnp d0, d1, [x0]\n\t"
"ldr d2, [x1,16]\n\t"
"ldr d3, [x1,24]\n\t"
"fmul d2, d2, d6\n\t"
"fmul d3, d3, d6\n\t"
"stnp d2, d3, [x0,16]\n\t"
"add x0, x0, #32\n\t"
"add x1, x1, #32\n\t"
"sub x4, x4, #4\n\t"
"cbnz x4, 1b\n\t"
: : "r" (&b[start]), "r" (&c[start]), "r" (&scalar), "r" (chunk) : "x0","x1","x3","x4","d0","d1","d2","d3","d6");
#endif
#ifndef BENCH_AVX512
#ifndef BENCH_AVX2
#ifndef BENCH_AVX
#ifndef BENCH_SSE
#ifndef BENCH_POWER8
#ifndef BENCH_ARMV8
for (j=start; j< start+chunk; j++)
b[j] = scalar * c[j];
#endif
#endif
#endif
#endif
#endif
#endif
}
}
void tuned_STREAM_Add()
{
#pragma omp parallel
{
ssize_t j;
#ifdef _OPENMP
ssize_t chunk = STREAM_ARRAY_SIZE/omp_get_num_threads();
ssize_t start = chunk*omp_get_thread_num();
#else
ssize_t chunk = STREAM_ARRAY_SIZE;
ssize_t start = 0;
#endif
#ifdef BENCH_AVX512
for (j=start; j< start+chunk; j+=8)
_mm512_stream_pd(&c[j], _mm512_add_pd(_mm512_load_pd(&a[j]), _mm512_load_pd(&b[j])));
#endif
#ifdef BENCH_AVX
for (j=start; j< start+chunk; j+=4)
_mm256_stream_pd(&c[j], _mm256_add_pd(_mm256_load_pd(&a[j]), _mm256_load_pd(&b[j])));
#endif
#ifdef BENCH_SSE
for (j=start; j< start+chunk; j+=2)
_mm_stream_pd(&c[j], _mm_add_pd(_mm_load_pd(&a[j]), _mm_load_pd(&b[j])));
#endif
#ifdef BENCH_SSE
for (j=start; j< start+chunk; j+=2)
_mm_stream_pd(&c[j], _mm_add_pd(_mm_load_pd(&a[j]), _mm_load_pd(&b[j])));
#endif
#ifdef BENCH_POWER8
for (j=start; j< start+chunk; j+=2)
vec_vsx_st(vec_add(vec_vsx_ld(0, &a[j]), vec_vsx_ld(0, &b[j])), 0, &c[j]);
#endif
#ifdef BENCH_ARMV8
__asm__ __volatile__("mov x0, %0\n\t" // a
"mov x1, %1\n\t" // b
"mov x2, %2\n\t" // c
"mov x4, %3\n\t" // chunk
"1:\n\t"
"ldr d0, [x0]\n\t"
"ldr d1, [x1]\n\t"
"fadd d0, d1, d0\n\t"
"ldr d2, [x0,8]\n\t"
"ldr d3, [x1,8]\n\t"
"fadd d2, d3, d2\n\t"
"stnp d0, d2, [x2]\n\t"
"ldr d4, [x0,16]\n\t"
"ldr d5, [x1,16]\n\t"
"fadd d4, d5, d4\n\t"
"ldr d7, [x0,24]\n\t"
"ldr d8, [x1,24]\n\t"
"fadd d7, d8, d7\n\t"
"stnp d4, d7, [x2,16]\n\t"
"add x0, x0, #32\n\t"
"add x1, x1, #32\n\t"
"add x2, x2, #32\n\t"
"sub x4, x4, #4\n\t"
"cbnz x4, 1b\n\t"
: : "r" (&a[start]), "r" (&b[start]), "r" (&c[start]), "r" (chunk) : "x0","x1","x2","x3","x4","d0","d1","d2","d3","d4","d5","d6","d7","d8");
#endif
#ifndef BENCH_AVX512
#ifndef BENCH_AVX2
#ifndef BENCH_AVX
#ifndef BENCH_SSE
#ifndef BENCH_POWER8
#ifndef BENCH_ARMV8
for (j=start; j< start+chunk; j++)
c[j] = a[j] + b[j];
#endif
#endif
#endif
#endif
#endif
#endif
}
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
#ifdef BENCH_AVX512
__m512d vecscalar = _mm512_set1_pd(scalar);
#endif
#ifdef BENCH_AVX
__m256d vecscalar = _mm256_set1_pd(scalar);
#endif
#ifdef BENCH_SSE
__m128d vecscalar = _mm_set1_pd(scalar);
#endif
#ifdef BENCH_POWER8
__attribute__((aligned(128))) STREAM_TYPE pumped_scalar[2] = {scalar, scalar};
vector double vecscalar = vec_vsx_ld(0, pumped_scalar);
#endif
#pragma omp parallel
{
ssize_t j;
#ifdef _OPENMP
ssize_t chunk = STREAM_ARRAY_SIZE/omp_get_num_threads();
ssize_t start = chunk*omp_get_thread_num();
#else
ssize_t chunk = STREAM_ARRAY_SIZE;
ssize_t start = 0;
#endif
#ifdef BENCH_AVX512
for (j=start; j< start+chunk; j+=8)
_mm512_stream_pd(&a[j], _mm512_add_pd(_mm512_load_pd(&b[j]), _mm512_mul_pd(vecscalar, _mm512_load_pd(&c[j])) ) );
#endif
#ifdef BENCH_AVX
for (j=start; j< start+chunk; j+=4)
_mm256_stream_pd(&a[j], _mm256_add_pd(_mm256_load_pd(&b[j]), _mm256_mul_pd(vecscalar, _mm256_load_pd(&c[j])) ) );
#endif
#ifdef BENCH_SSE
for (j=start; j< start+chunk; j+=2)
_mm_stream_pd(&a[j], _mm_add_pd(_mm_load_pd(&b[j]), _mm_mul_pd(vecscalar, _mm_load_pd(&c[j])) ) );
#endif
#ifdef BENCH_POWER8
for (j=start; j< start+chunk; j+=2)
vec_vsx_st(vec_madd(vec_vsx_ld(0, &c[j]), vecscalar, vec_vsx_ld(0, &b[j])), 0, &a[j]);
#endif
#ifdef BENCH_ARMV8
__asm__ __volatile__("mov x0, %0\n\t" // a
"mov x1, %1\n\t" // b
"mov x2, %2\n\t" // c
"mov x3, %3\n\t" // scalar
"mov x4, %4\n\t" // chunk
"ldr d6, [x3]\n\t"
"1:\n\t"
"ldr d0, [x1]\n\t"
"ldr d1, [x2]\n\t"
"fmadd d0, d6, d1, d0\n\t"
"ldr d2, [x1,8]\n\t"
"ldr d3, [x2,8]\n\t"
"fmadd d2, d6, d3, d2\n\t"
"stnp d0, d2, [x0]\n\t"
"ldr d4, [x1,16]\n\t"
"ldr d5, [x2,16]\n\t"
"fmadd d4, d6, d5, d4\n\t"
"ldr d7, [x1,24]\n\t"
"ldr d8, [x2,24]\n\t"
"fmadd d7, d6, d8, d7\n\t"
"stnp d4, d7, [x0,16]\n\t"
"add x0, x0, #32\n\t"
"add x1, x1, #32\n\t"
"add x2, x2, #32\n\t"
"sub x4, x4, #4\n\t"
"cbnz x4, 1b\n\t"
: : "r" (&a[start]), "r" (&b[start]), "r" (&c[start]), "r" (&scalar), "r" (chunk) : "x0","x1","x2","x3","x4","d0","d1","d2","d3","d4","d5","d6","d7","d8");
#endif
#ifndef BENCH_AVX512
#ifndef BENCH_AVX2
#ifndef BENCH_AVX
#ifndef BENCH_SSE
#ifndef BENCH_POWER8
#ifndef BENCH_ARMV8
for (j=start; j< start+chunk; j++)
a[j] = b[j] + (scalar * c[j]);
#endif
#endif
#endif
#endif
#endif
#endif
}
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
fracstep_GLS_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Author Julio Marti.
//
#if !defined(KRATOS_GLS_STRATEGY)
#define KRATOS_GLS_STRATEGY
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "utilities/geometry_utilities.h"
#include "pfem_2_application_variables.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/implicit_solving_strategy.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
//#include "custom_utilities/solver_settings.h"
#ifdef _OPENMP
#include "omp.h"
#endif
#define QCOMP
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/// Short class definition.
/** Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class FracStepStrategy
: public ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
/**@name Type Definitions */
/*@{ */
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( FracStepStrategy );
typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef OpenMPUtils::PartitionVector PartitionVector;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/**
* Constructor of the FracStepStrategy. Implements the solutions strategy for a Navier Stokes solver
* using the fractional step approach. Prepared for both openmp parallelism and mpi parallelism. The function
* also calls internally the "Check" function to verify that the input is complete
* @param model_part - contains Nodes, elements, etc.
* @param solver_config - auxiliary file to ease the configuration. Prescribes the linear solvers and builiding
* strategies to be used in defining the current composite solver.
* @see FractionalStepConfiguration for OpenMP setting or
* @see TrilinosFractionalStepConfiguration (in the Trilinos application) for the MPI version
* @param ReformDofAtEachIteration - if set to true the graph of the matrix is recomputed at each iteration
* @param velocity_toll - tolerance used in the velocity convergence check
* @param pressure_toll - pressure tolerance in finalizing the predictor corrector strategy
* @param MaxVelocityIterations - maximum number of iterations of the velocity solver
* @param MaxPressureIterations - max number of iteration for the predictor corrector strategy
* @param time_order - 1=BDF1 , 2=BDF2
* @param domain_size 2=2D, 3=3D
* @param predictor_corrector - true->for predictor corrector, false->standard Fractional Step (default = false)
*/
FracStepStrategy( ModelPart& model_part, typename TLinearSolver::Pointer pNewVelocityLinearSolver,typename TLinearSolver::Pointer pNewPressureLinearSolver,
bool ReformDofAtEachIteration = true,
double velocity_toll = 0.01,
double pressure_toll = 0.01,
int MaxVelocityIterations = 3,
int MaxPressureIterations = 1,
unsigned int time_order = 2,
unsigned int domain_size = 2,
bool predictor_corrector = false
)
: ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(model_part, false)//, msolver_config(solver_config)
{
KRATOS_TRY
this->mvelocity_toll = velocity_toll;
this->mpressure_toll = pressure_toll;
this->mMaxVelIterations = MaxVelocityIterations;
this->mMaxPressIterations = MaxPressureIterations;
this->mtime_order = time_order;
this->mprediction_order = time_order;
this->mdomain_size = domain_size;
this->mpredictor_corrector = predictor_corrector;
this->mReformDofAtEachIteration = ReformDofAtEachIteration;
this->proj_is_initialized = false;
this->mecho_level = 1;
bool CalculateReactions = false;
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false;
//computation of the fractional vel velocity (first step)
//3 dimensional case
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typename SchemeType::Pointer pscheme = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ());
// BuilderSolverTypePointer pVelocityBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewVelocityLinearSolver));
BuilderSolverTypePointer pVelocityBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewVelocityLinearSolver));
this->mpfracvel_strategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (model_part, pscheme, pVelocityBuildAndSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpfracvel_strategy->SetEchoLevel(1);
// BuilderSolverTypePointer pPressureBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewVelocityLinearSolver));
BuilderSolverTypePointer pPressureBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewPressureLinearSolver));
// this->mppressurestep = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (model_part, pscheme,pNewPressureLinearSolver, pressure_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mppressurestep = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (model_part, pscheme,pPressureBuildAndSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mppressurestep->SetEchoLevel(2);
this->m_step = 1;
mHasSlipProcess = false;
KRATOS_CATCH("")
}
/** Destructor.
*/
virtual ~FracStepStrategy()
{
}
/** Destructor.
*/
double Solve() override
{
KRATOS_TRY
Timer time;
Timer::Start("Solve_strategy");
#if defined(QCOMP)
double Dp_norm;
Dp_norm = IterativeSolve();
#else
//multifluids
AssignInitialStepValues();
double Dp_norm = 1.00;
//int iteration = 0;
//int MaxPressureIterations = this->mMaxPressIterations;
Dp_norm = IterativeSolve();
#endif
//this->Clear();
this->m_step += 1;
return Dp_norm;
KRATOS_CATCH("")
}
double SolvePressure()
{
KRATOS_TRY
//ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
this->SolveStep7(); //pold=pn+1
double Dp_norm = this->SolveStep2();
return Dp_norm;
KRATOS_CATCH("")
}
double IterativeSolve()
{
KRATOS_TRY
Timer time;
Timer::Start("Solve_ambos");
double Dp_norm = 1.00;
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
//KRATOS_THROW_ERROR(std::logic_error, "method not implemented" , "");
rCurrentProcessInfo[VISCOSITY] = 1.0;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
i->FastGetSolutionStepValue(VELOCITY_X,1) = i->FastGetSolutionStepValue(VELOCITY_X);
i->FastGetSolutionStepValue(VELOCITY_Y,1) = i->FastGetSolutionStepValue(VELOCITY_Y);
i->FastGetSolutionStepValue(VELOCITY_Z,1) = i->FastGetSolutionStepValue(VELOCITY_Z);
}
#if defined(QCOMP)
this->SolveStep1(this->mvelocity_toll, this->mMaxVelIterations);
#else
this->SolveStep3();
#endif
//double p_norm=0.0;
#if defined(QCOMP)
//polimero
this->SolveStepaux();
//int MaxPressureIterations = this->mMaxPressIterations;
//int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
//double p_norm = SavePressureIteration();
Dp_norm = 1.0;
//Timer::Stop("Solve_ambos");
//KRATOS_WATCH(time)
#else
int iteration = 0;
while ( iteration++ < 3)
{
Dp_norm = SolvePressure();
double p_norm = SavePressureIteration();
if (fabs(p_norm) > 1e-10){
Dp_norm /= p_norm;
}
else
Dp_norm = 1.0;
this->SolveStep4();
}
#endif
this->Clear();
return Dp_norm;
KRATOS_CATCH("")
}
/**
* copies PRESSURE->PRESSURE_OLD_IT
* @return the norm of the pressure vector
*/
double SavePressureIteration()
{
KRATOS_TRY
double local_p_norm = 0.0;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();
i != BaseType::GetModelPart().NodesEnd(); ++i)
{
//setting the old value of the pressure to the current one
const double& p = (i)->FastGetSolutionStepValue(PRESSURE);
local_p_norm += p*p;
}
double p_norm = local_p_norm;
//TODO: prepare for parallelization
p_norm = sqrt(p_norm);
return p_norm;
KRATOS_CATCH("")
}
void AssignInitialStepValues()
{
KRATOS_TRY
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
(i)->FastGetSolutionStepValue(PRESSURE_OLD_IT) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE,1) = 0.0;
}
KRATOS_CATCH("");
}
/**
* this function performs the iterative solution of the non-linear velocity problem in the first step
* of the fractional step procedure
* @param velocity_toll - tolerance used in the velocity convergence check
* @param MaxIterations - max number of iterations
*/
void SolveStep1(double velocity_toll, int MaxIterations)
{
KRATOS_TRY;
Timer time;
Timer::Start("SolveStep1");
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
double normDx = 0.0;
bool is_converged = false;
int iteration = 0;
//double iteration = 1;
//ModelPart& model_part=BaseType::GetModelPart();
while (is_converged == false && iteration++<3)
{
//perform one iteration over the fractional step velocity
normDx = FractionalVelocityIteration();
is_converged = ConvergenceCheck(normDx, velocity_toll);
}
if (is_converged == false)
if (rank == 0) std::cout << "ATTENTION: convergence NOT achieved" << std::endl;
KRATOS_CATCH("");
}
double FractionalVelocityIteration()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 1;
double normDx = mpfracvel_strategy->Solve();
return normDx;
KRATOS_CATCH("");
}
void SolveStep4()
{
KRATOS_TRY;
Timer time;
Timer::Start("paso_4");
array_1d<double, 3 > zero = ZeroVector(3);
//#ifdef _OPENMP
// int number_of_threads = omp_get_max_threads();
//#else
// int number_of_threads = 1;
//#endif
//ModelPart& model_part=BaseType::GetModelPart();
//double dt = model_part.GetProcessInfo()[DELTA_TIME];
//dt=0.005;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
array_1d<double, 3 > zero = ZeroVector(3);
i->FastGetSolutionStepValue(FORCE)=ZeroVector(3);
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 6;
for (ModelPart::ElementIterator i = BaseType::GetModelPart().ElementsBegin(); i != BaseType::GetModelPart().ElementsEnd(); ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
double A = (i)->FastGetSolutionStepValue(NODAL_MASS);
if(A<0.0000000000000001){
A=1.0;
}
double dt_Minv = 0.005 / A ;
//dt_Minv=1.0;
force_temp *= dt_Minv;
//KRATOS_WATCH(force_temp);
if(!i->IsFixed(VELOCITY_X)) //FRACT_VEL_X
{
i->FastGetSolutionStepValue(VELOCITY_X) += force_temp[0] ;
}
if(!i->IsFixed(VELOCITY_Y))
{
i->FastGetSolutionStepValue(VELOCITY_Y) +=force_temp[1];
}
if(!i->IsFixed(VELOCITY_Z))
{
i->FastGetSolutionStepValue(VELOCITY_Z) +=force_temp[2] ;
}
if(i->IsFixed(VELOCITY_X))
{
i->FastGetSolutionStepValue(VELOCITY_X)=0.0; //i->FastGetSolutionStepValue(VELOCITY_X,1);
}
if(i->IsFixed(VELOCITY_Y))
{
i->FastGetSolutionStepValue(VELOCITY_Y)= 0.0; //i->FastGetSolutionStepValue(VELOCITY_Y,1) ;
}
if(i->IsFixed(VELOCITY_Z))
{
i->FastGetSolutionStepValue(VELOCITY_Z)=0.0; //i->FastGetSolutionStepValue(VELOCITY_Z,1) ;
}
}
KRATOS_CATCH("");
}
void SolveStep7()
{
KRATOS_TRY;
// ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
array_1d<double, 3 > zero = ZeroVector(3);
//Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
//ModelPart& model_part=BaseType::GetModelPart();
//const double dt = model_part.GetProcessInfo()[DELTA_TIME];
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (typename ModelPart::NodesContainerType::iterator it=it_begin; it!=it_end; ++it)
{
it->FastGetSolutionStepValue(PRESSURE_OLD_IT)=it->FastGetSolutionStepValue(PRESSURE);
}
}
KRATOS_CATCH("");
}
double SolveStep2()
{
KRATOS_TRY;
Timer::Start("Presion");
BaseType::GetModelPart().GetProcessInfo()[FRACTIONAL_STEP] = 4;
return mppressurestep->Solve();
Timer::Stop("Presion");
//KRATOS_WATCH(*time)
//mppressurestep->Clear();
KRATOS_CATCH("");
}
void SolveStep3()
{
KRATOS_TRY
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
Timer time;
Timer::Start("paso_3");
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
noalias(i->FastGetSolutionStepValue(FORCE)) = zero;
//double & nodal_area = (i)->FastGetSolutionStepValue(NODAL_AREA);
//nodal_area = 0.0;
}
}
array_1d<double,3> zero = ZeroVector(3);
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 5;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
force_temp *=(1.0/ i->FastGetSolutionStepValue(NODAL_MASS));
//array_1d<double,3>& vel = i->FastGetSolutionStepValue(VELOCITY);
i->FastGetSolutionStepValue(VELOCITY) = i->FastGetSolutionStepValue(VELOCITY,1) + dt * force_temp;
}
}
KRATOS_CATCH("");
}
void SolveStepaux()
{
KRATOS_TRY
Timer time;
Timer::Start("SolveStepaux");
//ModelPart& model_part=BaseType::GetModelPart();
//const double dt = model_part.GetProcessInfo()[DELTA_TIME];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
//number_of_threads = 1;
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
i->FastGetSolutionStepValue(PRESSUREAUX)=0.0;
i->FastGetSolutionStepValue(PRESSURE)=0.0;
}
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 7;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
if(i->FastGetSolutionStepValue(NODAL_MASS)==0.0)
{
i->FastGetSolutionStepValue(PRESSURE)=0.0;
}
else
{
//if()
i->FastGetSolutionStepValue(PRESSURE)=i->FastGetSolutionStepValue(PRESSUREAUX) * (1.0/ i->FastGetSolutionStepValue(NODAL_MASS));
}
}
}
KRATOS_CATCH("");
}
/**
* implements the convergence check for the velocities
* convergence is considered achieved when normDx/norm(v) is less than tol
* @param normDx norm of the VELOCITY correction
* @param toll tolerance accepted
* @return true if converged
*/
bool ConvergenceCheck(const double& normDx, double tol)
{
KRATOS_TRY;
double norm_v = 0.00;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();
i != BaseType::GetModelPart().NodesEnd(); ++i)
{
const array_1d<double, 3 > & v = (i)->FastGetSolutionStepValue(VELOCITY);
norm_v += v[0] * v[0];
norm_v += v[1] * v[1];
norm_v += v[2] * v[2];
}
//BaseType::GetModelPart().GetCommunicator().SumAll(norm_v);
double norm_v1 = sqrt(norm_v);
if (norm_v1 == 0.0) norm_v1 = 1.00;
double ratio = normDx / norm_v1;
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
if (rank == 0) std::cout << "velocity ratio = " << ratio << std::endl;
if (ratio < tol)
{
if (rank == 0) std::cout << "convergence achieved" << std::endl;
return true;
}
return false;
KRATOS_CATCH("");
}
void Compute()
{
KRATOS_TRY
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
array_1d<double,3> aux;
array_1d<double,3> aux1;
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
//first of all set to zero the nodal variables to be updated nodally
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
noalias(i->FastGetSolutionStepValue(FORCE)) = zero;
(i)->FastGetSolutionStepValue(NODAL_MASS)=0.0;
}
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 6;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
//first of all set to zero the nodal variables to be updated nodally
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
force_temp -=i->FastGetSolutionStepValue(NODAL_MASS) * ((i)->FastGetSolutionStepValue(VELOCITY)-(i)->FastGetSolutionStepValue(VELOCITY,1))/dt;
}
}
KRATOS_CATCH("");
}
/**
*
* @param Level
*/
virtual void SetEchoLevel(int Level) override
{
mecho_level = Level;
mpfracvel_strategy->SetEchoLevel(Level);
mppressurestep->SetEchoLevel(Level);
}
virtual void Clear() override
{
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
if (rank == 0) KRATOS_WATCH("FracStepStrategy Clear Function called");
mpfracvel_strategy->Clear();
mppressurestep->Clear();
}
virtual double GetStageResidualNorm(unsigned int step)
{
if (step <= 3)
return mpfracvel_strategy->GetResidualNorm();
if (step == 4)
return mppressurestep->GetResidualNorm();
else
return 0.0;
}
/*@} */
/**@name Operators
*/
/*@{ */
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
typename BaseType::Pointer mpfracvel_strategy;
typename BaseType::Pointer mppressurestep;
double mvelocity_toll;
double mpressure_toll;
int mMaxVelIterations;
int mMaxPressIterations;
unsigned int mtime_order;
unsigned int mprediction_order;
bool mpredictor_corrector;
bool mReformDofAtEachIteration;
int mecho_level;
bool muse_dt_in_stabilization;
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
unsigned int m_step;
unsigned int mdomain_size;
bool proj_is_initialized;
//GenerateSlipConditionProcess::Pointer mpSlipProcess;
bool mHasSlipProcess;
std::vector< Process::Pointer > mInitializeIterationProcesses;
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
/*@} */
/**@name Private Operators*/
/*@{ */
//this funcion is needed to ensure that all the memory is allocated correctly
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/** Copy constructor.
*/
FracStepStrategy(const FracStepStrategy& Other);
/*@} */
}; /* Class FracStepStrategy */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_FRACTIONALSTEP_STRATEGY defined */
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include "gather_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
typedef struct
{
int* in_shape; // the dim of the input
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
// #pragma omp parallel for num_threads(num_thread)
if(param->is_onnx){
for (int outer = 0; outer < outer_size; ++outer)
{
memcpy(out_ptr + (outer * param->indices_num ) * inner_size,
in_ptr + (outer* axis_size + param->indices_num) * inner_size, inner_size* sizeof(float));
}
} else {
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv;
sys_free(op_param->in_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_gather_ref_op(void* arg)
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
int unregister_gather_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
|
GB_unop__identity_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define REAL float
static double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title, char *name, REAL *A, int n, int m) {
printf("%s:\n", title);
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) {
int i;
int j;
int xx;
int yy;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int) (-1.0 + (*dx * (i - 1))));
yy = ((int) (-1.0 + (*dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx)))
* (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx))))
- (2.0 * (1.0 - (yy * yy))));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) {
int i;
int j;
REAL xx;
REAL yy;
REAL temp;
REAL error;
error = 0.0;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (-1.0 + (dx * (i - 1)));
yy = (-1.0 + (dy * (j - 1)));
temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy))));
error = (error + (temp * temp));
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n", error);
}
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
int main(int argc, char *argv[]) {
int n = DEFAULT_DIMSIZE;
int m = DEFAULT_DIMSIZE;
REAL alpha = 0.0543;
REAL tol = 0.0000000001;
REAL relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1], "%d", &n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
}
else if (argc == 4) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
}
else if (argc == 5) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
}
else if (argc == 6) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
}
else if (argc == 7) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
sscanf(argv[6], "%d", &mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
REAL *u = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *f = (REAL *) malloc(sizeof(REAL) * n * m);
REAL dx; /* grid spacing in x direction */
REAL dy; /* grid spacing in y direction */
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
double elapsed = read_timer_ms();
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
elapsed = read_timer_ms() - elapsed;
printf("seq elasped time(ms): %4f\n", elapsed);
double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
printf("MFLOPS: %12.6g\n", mflops);
puts("================");
elapsed = read_timer_ms();
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
elapsed = read_timer_ms() - elapsed;
printf("OpenMP elasped time(ms): %4f\n", elapsed);
mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
printf("MFLOPS: %12.6g\n", mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n, m, alpha, dx, dy, u, f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL uold[n][m];
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
}
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL (*uold)[m] = (REAL (*)[m]) tmp;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++)
#pragma omp simd
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
#pragma omp simd
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
free(tmp);
}
|
array_out_bound_fix.c | //#include <omp.h>
int main(){
int i = 0;
int *p = &i;
int a[2];
#pragma omp parallel for
for(i=1; i<10; i++){
a[i] = a[*p + i + 1];
}
}
|
GB_unaryop__minv_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_int16
// op(A') function: GB_tran__minv_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_int16
(
uint16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c | #include <stdio.h>
#define N 1024
#define TEST_NESTED 1
#define TEST_CONCURRENT 1
#define TEST_CONCURRENT_TF 1
#define TEST_PARALLEL1 1
int a[N], b[N];
int main() {
int i;
int error, totError = 0;
#if TEST_NESTED
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps conpleted successfully\n");
} else {
printf(" test with nested maps conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT_TF
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent with to/from maps conpleted successfully\n");
} else {
printf(" test with concurrent with to/from maps conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent maps conpleted successfully\n");
} else {
printf(" test with concurrent maps conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_PARALLEL1
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel num_threads(1)
{
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps and Parallel 1 thread conpleted successfully\n");
} else {
printf(" test with nested maps and Parallel 1 thread conpleted with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
omp_parallel_for_if.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for if</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){
int known_sum;
<ompts:orphan:vars>
int num_threads;
int sum, sum2;
int i;
int control;
</ompts:orphan:vars>
control = 0;
num_threads=0;
sum = 0;
sum2 = 0;
#pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check>
<ompts:orphan>
for (i=0; i <= LOOPCOUNT; i++)
{
num_threads = omp_get_num_threads();
sum = sum + i;
} /*end of for*/
</ompts:orphan>
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads);
return (known_sum == sum && num_threads == 1);
} /* end of check_paralel_for_private */
</ompts:testcode>
</ompts:test>
|
pr26943-1.c | /* PR c++/26943 */
/* { dg-do run } */
extern void abort (void);
extern void omp_set_dynamic (int);
int n = 6;
int
main (void)
{
int i, x = 0;
omp_set_dynamic (0);
#pragma omp parallel for num_threads (16) firstprivate (n) lastprivate (n) \
schedule (static, 1) reduction (+: x)
for (i = 0; i < 16; i++)
{
if (n != 6)
++x;
n = i;
}
if (x || n != 15)
abort ();
return 0;
}
|
many-microtask-args.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
int main()
{
int i1 = 0;
int i2 = 1;
int i3 = 2;
int i4 = 3;
int i5 = 4;
int i6 = 6;
int i7 = 7;
int i8 = 8;
int i9 = 9;
int i10 = 10;
int i11 = 11;
int i12 = 12;
int i13 = 13;
int i14 = 14;
int i15 = 15;
int i16 = 16;
int r = 0;
#pragma omp parallel for firstprivate(i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16) reduction(+:r)
for (int i = 0; i < i16; i++) {
r += i + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + i10 + i11 + i12 + i13 + i14 + i15 + i16;
}
int rf = 2216;
if (r != rf) {
fprintf(stderr, "r should be %d but instead equals %d\n", rf, r);
return 1;
}
return 0;
}
|
tti_prot_so4.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r48_vec, float *restrict r49_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r48_vec, float *restrict r49_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
int ForwardTTI(struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict phi_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int x_size, const int y_M, const int y_m, const int y_size, const int z_M, const int z_m, const int z_size, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers, const int x0_blk0_size, const int x1_blk0_size, const int y0_blk0_size, const int y1_blk0_size, const int nthreads, const int nthreads_nonaffine)
{
float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]]) delta_vec->data;
float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]]) theta_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
float (*r21)[y_size + 1][z_size + 1];
posix_memalign((void**)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r20)[y_size + 1][z_size + 1];
posix_memalign((void**)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r19)[y_size + 1][z_size + 1];
posix_memalign((void**)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r18)[y_size + 1][z_size + 1];
posix_memalign((void**)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r17)[y_size + 1][z_size + 1];
posix_memalign((void**)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r48)[y_size + 1][z_size + 1];
posix_memalign((void**)&r48, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r49)[y_size + 1][z_size + 1];
posix_memalign((void**)&r49, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static,1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta,phi,theta:32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2*delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))
{
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,x_size,y_size,z_size,t0,x0_blk0_size,x_M - (x_M - x_m + 2)%(x0_blk0_size),x_m - 1,y0_blk0_size,y_M - (y_M - y_m + 2)%(y0_blk0_size),y_m - 1,z_M,z_m,nthreads);
bf0((float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,x_size,y_size,z_size,t0,x0_blk0_size,x_M - (x_M - x_m + 2)%(x0_blk0_size),x_m - 1,(y_M - y_m + 2)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 2)%(y0_blk0_size) + 1,z_M,z_m,nthreads);
bf0((float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,x_size,y_size,z_size,t0,(x_M - x_m + 2)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 2)%(x0_blk0_size) + 1,y0_blk0_size,y_M - (y_M - y_m + 2)%(y0_blk0_size),y_m - 1,z_M,z_m,nthreads);
bf0((float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,x_size,y_size,z_size,t0,(x_M - x_m + 2)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 2)%(x0_blk0_size) + 1,(y_M - y_m + 2)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 2)%(y0_blk0_size) + 1,z_M,z_m,nthreads);
bf1(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,vp_vec,x_size,y_size,z_size,t0,t1,t2,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads);
bf1(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,vp_vec,x_size,y_size,z_size,t0,t1,t2,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads);
bf1(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,vp_vec,x_size,y_size,z_size,t0,t1,t2,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads);
bf1(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,(float *)r48,(float *)r49,u_vec,v_vec,vp_vec,x_size,y_size,z_size,t0,t1,t2,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads);
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
float posx = -o_x + src_coords[p_src][0];
float posy = -o_y + src_coords[p_src][1];
float posz = -o_z + src_coords[p_src][2];
int ii_src_0 = (int)(floor(1.0e-1*posx));
int ii_src_1 = (int)(floor(1.0e-1*posy));
int ii_src_2 = (int)(floor(1.0e-1*posz));
int ii_src_3 = (int)(floor(1.0e-1*posz)) + 1;
int ii_src_4 = (int)(floor(1.0e-1*posy)) + 1;
int ii_src_5 = (int)(floor(1.0e-1*posx)) + 1;
float px = (float)(posx - 1.0e+1F*(int)(floor(1.0e-1F*posx)));
float py = (float)(posy - 1.0e+1F*(int)(floor(1.0e-1F*posy)));
float pz = (float)(posz - 1.0e+1F*(int)(floor(1.0e-1F*posz)));
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r22 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r22;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r23 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r23;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r24 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r24;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r25 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r25;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r26 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r26;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r27 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r27;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r28 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r28;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r29 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4])*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r29;
}
posx = -o_x + src_coords[p_src][0];
posy = -o_y + src_coords[p_src][1];
posz = -o_z + src_coords[p_src][2];
ii_src_0 = (int)(floor(1.0e-1*posx));
ii_src_1 = (int)(floor(1.0e-1*posy));
ii_src_2 = (int)(floor(1.0e-1*posz));
ii_src_3 = (int)(floor(1.0e-1*posz)) + 1;
ii_src_4 = (int)(floor(1.0e-1*posy)) + 1;
ii_src_5 = (int)(floor(1.0e-1*posx)) + 1;
px = (float)(posx - 1.0e+1F*(int)(floor(1.0e-1F*posx)));
py = (float)(posy - 1.0e+1F*(int)(floor(1.0e-1F*posy)));
pz = (float)(posz - 1.0e+1F*(int)(floor(1.0e-1F*posz)));
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r30 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r30;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r31 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r31;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r32 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r32;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r33 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r33;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r34 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r34;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r35 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r35;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r36 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r36;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r37 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4])*src[time][p_src];
#pragma omp atomic update
v[t1][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r37;
}
}
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
free(r21);
free(r20);
free(r19);
free(r18);
free(r17);
free(r48);
free(r49);
return 0;
}
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r48_vec, float *restrict r49_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float (*restrict r18)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r18_vec;
float (*restrict r19)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r19_vec;
float (*restrict r20)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r20_vec;
float (*restrict r21)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r21_vec;
float (*restrict r48)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r48_vec;
float (*restrict r49)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r49_vec;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1)
{
for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(u,v:32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r53 = -v[t0][x + 4][y + 4][z + 4];
r49[x + 1][y + 1][z + 1] = 1.0e-1F*(-(r53 + v[t0][x + 4][y + 4][z + 5])*r18[x + 1][y + 1][z + 1] - (r53 + v[t0][x + 4][y + 5][z + 4])*r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1] - (r53 + v[t0][x + 5][y + 4][z + 4])*r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]);
float r54 = -u[t0][x + 4][y + 4][z + 4];
r48[x + 1][y + 1][z + 1] = 1.0e-1F*(-(r54 + u[t0][x + 4][y + 4][z + 5])*r18[x + 1][y + 1][z + 1] - (r54 + u[t0][x + 4][y + 5][z + 4])*r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1] - (r54 + u[t0][x + 5][y + 4][z + 4])*r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r48_vec, float *restrict r49_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data;
float (*restrict r17)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r17_vec;
float (*restrict r18)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r18_vec;
float (*restrict r19)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r19_vec;
float (*restrict r20)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r20_vec;
float (*restrict r21)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r21_vec;
float (*restrict r48)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r48_vec;
float (*restrict r49)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r49_vec;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
if (x1_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x1_blk0 = x_m; x1_blk0 <= x_M; x1_blk0 += x1_blk0_size)
{
for (int y1_blk0 = y_m; y1_blk0 <= y_M; y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= x1_blk0 + x1_blk0_size - 1; x += 1)
{
for (int y = y1_blk0; y <= y1_blk0 + y1_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(damp,epsilon,u,v,vp:32)
for (int z = z_m; z <= z_M; z += 1)
{
float r60 = 1.0/dt;
float r59 = 1.0/(dt*dt);
float r58 = r18[x + 1][y + 1][z]*r49[x + 1][y + 1][z] - r18[x + 1][y + 1][z + 1]*r49[x + 1][y + 1][z + 1] + r19[x + 1][y][z + 1]*r20[x + 1][y][z + 1]*r49[x + 1][y][z + 1] - r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1]*r49[x + 1][y + 1][z + 1] + r20[x][y + 1][z + 1]*r21[x][y + 1][z + 1]*r49[x][y + 1][z + 1] - r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]*r49[x + 1][y + 1][z + 1];
float r57 = pow(vp[x + 4][y + 4][z + 4], -2);
float r56 = 1.0e-1F*(-r18[x + 1][y + 1][z]*r48[x + 1][y + 1][z] + r18[x + 1][y + 1][z + 1]*r48[x + 1][y + 1][z + 1] - r19[x + 1][y][z + 1]*r20[x + 1][y][z + 1]*r48[x + 1][y][z + 1] + r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1]*r48[x + 1][y + 1][z + 1] - r20[x][y + 1][z + 1]*r21[x][y + 1][z + 1]*r48[x][y + 1][z + 1] + r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]*r48[x + 1][y + 1][z + 1]) - 8.33333315e-4F*(u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 1.3333333e-2F*(u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 7.49999983e-2F*u[t0][x + 4][y + 4][z + 4];
float r55 = 1.0/(r57*r59 + r60*damp[x + 1][y + 1][z + 1]);
float r46 = r59*(-2.0F*u[t0][x + 4][y + 4][z + 4] + u[t2][x + 4][y + 4][z + 4]);
float r47 = r59*(-2.0F*v[t0][x + 4][y + 4][z + 4] + v[t2][x + 4][y + 4][z + 4]);
u[t1][x + 4][y + 4][z + 4] = r55*((-r46)*r57 + r56*(2*epsilon[x + 4][y + 4][z + 4] + 1) + 1.0e-1F*r58*r17[x + 1][y + 1][z + 1] + r60*(damp[x + 1][y + 1][z + 1]*u[t0][x + 4][y + 4][z + 4]));
v[t1][x + 4][y + 4][z + 4] = r55*((-r47)*r57 + r56*r17[x + 1][y + 1][z + 1] + 1.0e-1F*r58 + r60*(damp[x + 1][y + 1][z + 1]*v[t0][x + 4][y + 4][z + 4]));
}
}
}
}
}
}
}
|
5829.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
spmm.h | // ------------------------------------------------------------------------
// File: spmm.h
// S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs.
// This file implements the Sparse-Matrix-Dense-Matrix multiplication (SPMM).
// ------------------------------------------------------------------------
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz
// and Linghao Song
// GitHub repo: http://www.github.com/uuudown/S-BLAS
// PNNL-IPID: 31803-E, IR: PNNL-31803
// MIT Lincese.
// ------------------------------------------------------------------------
#ifndef SPMM_H
#define SPMM_H
#include <iostream>
#include <assert.h>
#include <omp.h>
#include <cusparse.h>
#include <nccl.h>
#include "utility.h"
#include "matrix.h"
using namespace std;
//========================== CPU baseline version ============================
template <typename IdxType, typename DataType>
void sblas_spmm_csr_cpu(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width));
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_CPU: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
for (IdxType i=0; i<pA->height; i++)
{
for(IdxType n=0; n<pB->width; n++)
{
DataType sum = 0;
for (IdxType j=pA->csrRowPtr[i]; j<pA->csrRowPtr[i+1]; j++)
{
IdxType col_A = pA->csrColIdx[j];
DataType val_A = pA->csrVal[j];
DataType val_B = pB->val[n*(pB->height)+col_A];
sum += val_A * val_B;
}
pC->val[i*(pC->width)+n] = beta*(pC->val[n*(pC->height)+i]) + alpha*sum;
}
}
}
else
{
for (IdxType i=0; i<pA->height; i++)
{
for(IdxType n=0; n<pB->width; n++)
{
DataType sum = 0;
for (IdxType j=pA->csrRowPtr[i]; j<pA->csrRowPtr[i+1]; j++)
{
IdxType col_A = pA->csrColIdx[j];
DataType val_A = pA->csrVal[j];
DataType val_B = pB->val[n*(pB->height)+col_A];
sum += val_A * val_B;
}
pC->val[n*(pC->height)+i] = beta*(pC->val[n*(pC->height)+i]) + alpha*sum;
}
}
}
}
/** Compute Sparse-Matrix-Dense-Matrix Multiplication using multi-GPUs.
* Since A and B are allocated on unified memory, there is no need for memcpy.
* The idea is to reuse A on each GPU and parition B, then each GPU calls
* cuSparse single-GPU spMM to compute its own share. For this method, there is
* no explicit inter-GPU communication required.
* --------- C = A * B -----------
* A[m*k] in CSR sparse format
* B[k*n] in column major dense format
* C[m*n] in column major dense format
*/
template <typename IdxType, typename DataType>
void sblas_spmm_csr_v1(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta,
unsigned n_gpu)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width) );
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V1: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V1: C should be in column major!" << endl;
exit(-1);
}
//Start OpenMP
#pragma omp parallel num_threads (n_gpu)
{
int i_gpu = omp_get_thread_num();
CUDA_SAFE_CALL( cudaSetDevice(i_gpu) );
cusparseHandle_t handle;
cusparseMatDescr_t mat_A;
cusparseStatus_t cusparse_status;
CHECK_CUSPARSE( cusparseCreate(&handle) );
CHECK_CUSPARSE( cusparseCreateMatDescr(&mat_A) );
CHECK_CUSPARSE( cusparseSetMatType(mat_A, CUSPARSE_MATRIX_TYPE_GENERAL) );
CHECK_CUSPARSE( cusparseSetMatIndexBase(mat_A, CUSPARSE_INDEX_BASE_ZERO) );
printf("gpu-%d m:%d,n:%ld,k:%d\n",i_gpu, pA->height, pB->get_dim_gpu_num(i_gpu), pA->width);
CHECK_CUSPARSE( cusparseDcsrmm(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
pA->height,
pB->get_dim_gpu_num(i_gpu),
pA->width,
pA->nnz,
&alpha,
mat_A,
pA->csrVal_gpu[i_gpu],
pA->csrRowPtr_gpu[i_gpu],
pA->csrColIdx_gpu[i_gpu],
pB->val_gpu[i_gpu],
pA->width,
&beta,
pC->val_gpu[i_gpu],
pC->height) );
pC->sync2cpu(i_gpu);
#pragma omp barrier
CHECK_CUSPARSE( cusparseDestroyMatDescr(mat_A) );
CHECK_CUSPARSE( cusparseDestroy(handle) );
} //end of omp
}
template <typename IdxType, typename DataType>
void sblas_spmm_csr_v2(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta,
unsigned n_gpu)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width) );
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V2: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V2: C should be in col major!" << endl;
exit(-1);
}
ncclUniqueId id;
ncclGetUniqueId(&id);
ncclComm_t comm[n_gpu];
DenseMatrix<IdxType,DataType> C_copy(pC->height, pC->width, 0., row_major);
C_copy.sync2gpu(n_gpu, replicate);
//Start OpenMP
#pragma omp parallel num_threads (n_gpu) shared (comm, id)
{
int i_gpu = omp_get_thread_num();
CUDA_SAFE_CALL( cudaSetDevice(i_gpu) );
CHECK_NCCL(ncclCommInitRank(&comm[i_gpu], n_gpu, id, i_gpu));
cusparseHandle_t handle;
cusparseMatDescr_t mat_A;
cusparseStatus_t cusparse_status;
CHECK_CUSPARSE( cusparseCreate(&handle) );
CHECK_CUSPARSE( cusparseCreateMatDescr(&mat_A) );
CHECK_CUSPARSE( cusparseSetMatType(mat_A, CUSPARSE_MATRIX_TYPE_GENERAL) );
CHECK_CUSPARSE( cusparseSetMatIndexBase(mat_A, CUSPARSE_INDEX_BASE_ZERO) );
DataType dummy_alpha = 1.0;
DataType dummy_beta = 1.0;
CHECK_CUSPARSE( cusparseDcsrmm(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
pA->get_gpu_row_ptr_num(i_gpu)-1,
pB->width,
pA->width,
pA->nnz_gpu[i_gpu],
&dummy_alpha,
mat_A,
pA->csrVal_gpu[i_gpu],
pA->csrRowPtr_gpu[i_gpu],
pA->csrColIdx_gpu[i_gpu],
pB->val_gpu[i_gpu],
pB->height,
&dummy_beta,
/*C_copy.val_gpu[i_gpu],*/
&(C_copy.val_gpu[i_gpu])[(pA->starting_row_gpu[i_gpu])],
/*&(C_copy.val_gpu[i_gpu])[(pA->starting_row_gpu[i_gpu])*(pB->width)],*/
/*C_copy.val_gpu[i_gpu] + 1,*/
//pC->width) );
pC->height) );
#pragma omp barrier
CUDA_SAFE_CALL(cudaThreadSynchronize());
#pragma omp barrier
gpu_timer nccl_timer;
nccl_timer.start_timer();
CHECK_NCCL( ncclAllReduce(C_copy.val_gpu[i_gpu], C_copy.val_gpu[i_gpu],
C_copy.get_mtx_num(), ncclDouble, ncclSum, comm[i_gpu], 0) );
CUDA_SAFE_CALL(cudaThreadSynchronize());
#pragma omp barrier
nccl_timer.stop_timer();
cout << "GPU-" << i_gpu << " NCCL Time: " << nccl_timer.measure() << "ms." << endl;
CHECK_CUSPARSE( cusparseDestroyMatDescr(mat_A) );
CHECK_CUSPARSE( cusparseDestroy(handle) );
CHECK_NCCL(ncclCommDestroy(comm[i_gpu]));
}
CUDA_CHECK_ERROR();
pC->plusDenseMatrixGPU(C_copy, alpha, beta);
}
#endif
|
GB_unaryop__lnot_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int64
// op(A') function: GB_tran__lnot_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int64
(
float *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prot_a3ada.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, float * *restrict r61_vec, float * *restrict r62_vec);
int ForwardTTI(struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict phi_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_M, const int x_m, const int x_size, const int y0_blk0_size, const int y_M, const int y_m, const int y_size, const int z_M, const int z_m, const int z_size, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers, const int nthreads, const int nthreads_nonaffine)
{
float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]]) delta_vec->data;
float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]]) theta_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
float (*r17)[y_size + 1][z_size + 1];
posix_memalign((void**)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r18)[y_size + 1][z_size + 1];
posix_memalign((void**)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r19)[y_size + 1][z_size + 1];
posix_memalign((void**)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r20)[y_size + 1][z_size + 1];
posix_memalign((void**)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float (*r21)[y_size + 1][z_size + 1];
posix_memalign((void**)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r61;
posix_memalign((void**)&r61, 64, sizeof(float*)*nthreads);
float **r62;
posix_memalign((void**)&r62, 64, sizeof(float*)*nthreads);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void**)&r61[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void**)&r62[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static,1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta,phi,theta:32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r17[x + 1][y + 1][z + 1] = sqrt(2*delta[x + 4][y + 4][z + 4] + 1);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
for (int time = time_m, t1 = (time + 2)%(3), t0 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t1 = (time + 2)%(3), t0 = (time)%(3), t2 = (time + 1)%(3))
{
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,u_vec,v_vec,vp_vec,x0_blk0_size,x_size,y0_blk0_size,y_size,z_size,t0,t1,t2,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads,(float * *)r61,(float * *)r62);
bf0(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,u_vec,v_vec,vp_vec,x0_blk0_size,x_size,(y_M - y_m + 1)%(y0_blk0_size),y_size,z_size,t0,t1,t2,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads,(float * *)r61,(float * *)r62);
bf0(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,u_vec,v_vec,vp_vec,(x_M - x_m + 1)%(x0_blk0_size),x_size,y0_blk0_size,y_size,z_size,t0,t1,t2,x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads,(float * *)r61,(float * *)r62);
bf0(damp_vec,dt,epsilon_vec,(float *)r17,(float *)r18,(float *)r19,(float *)r20,(float *)r21,u_vec,v_vec,vp_vec,(x_M - x_m + 1)%(x0_blk0_size),x_size,(y_M - y_m + 1)%(y0_blk0_size),y_size,z_size,t0,t1,t2,x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads,(float * *)r61,(float * *)r62);
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
float posx = -o_x + src_coords[p_src][0];
float posy = -o_y + src_coords[p_src][1];
float posz = -o_z + src_coords[p_src][2];
int ii_src_0 = (int)(floor(1.0e-1*posx));
int ii_src_1 = (int)(floor(1.0e-1*posy));
int ii_src_2 = (int)(floor(1.0e-1*posz));
int ii_src_3 = (int)(floor(1.0e-1*posz)) + 1;
int ii_src_4 = (int)(floor(1.0e-1*posy)) + 1;
int ii_src_5 = (int)(floor(1.0e-1*posx)) + 1;
float px = (float)(posx - 1.0e+1F*(int)(floor(1.0e-1F*posx)));
float py = (float)(posy - 1.0e+1F*(int)(floor(1.0e-1F*posy)));
float pz = (float)(posz - 1.0e+1F*(int)(floor(1.0e-1F*posz)));
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r22 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r22;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r23 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r23;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r24 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r24;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r25 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r25;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r26 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r26;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r27 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r27;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r28 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r28;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r29 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4])*src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r29;
}
posx = -o_x + src_coords[p_src][0];
posy = -o_y + src_coords[p_src][1];
posz = -o_z + src_coords[p_src][2];
ii_src_0 = (int)(floor(1.0e-1*posx));
ii_src_1 = (int)(floor(1.0e-1*posy));
ii_src_2 = (int)(floor(1.0e-1*posz));
ii_src_3 = (int)(floor(1.0e-1*posz)) + 1;
ii_src_4 = (int)(floor(1.0e-1*posy)) + 1;
ii_src_5 = (int)(floor(1.0e-1*posx)) + 1;
px = (float)(posx - 1.0e+1F*(int)(floor(1.0e-1F*posx)));
py = (float)(posy - 1.0e+1F*(int)(floor(1.0e-1F*posy)));
pz = (float)(posz - 1.0e+1F*(int)(floor(1.0e-1F*posz)));
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r30 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r30;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r31 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r31;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r32 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r32;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r33 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r33;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r34 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r34;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r35 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r35;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r36 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r36;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r37 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4])*src[time][p_src];
#pragma omp atomic update
v[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r37;
}
}
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r61[tid]);
free(r62[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r61);
free(r62);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, float * *restrict r61_vec, float * *restrict r62_vec)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data;
float (*restrict r17)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r17_vec;
float (*restrict r18)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r18_vec;
float (*restrict r19)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r19_vec;
float (*restrict r20)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r20_vec;
float (*restrict r21)[y_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y_size + 1][z_size + 1]) r21_vec;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
float **r61 = (float**) r61_vec;
float **r62 = (float**) r62_vec;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float (*restrict r48)[y0_blk0_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y0_blk0_size + 1][z_size + 1]) r61[tid];
float (*restrict r49)[y0_blk0_size + 1][z_size + 1] __attribute__ ((aligned (64))) = (float (*)[y0_blk0_size + 1][z_size + 1]) r62[tid];
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 1, xs = 0; x <= x0_blk0 + x0_blk0_size - 1; x += 1, xs += 1)
{
for (int y = y0_blk0 - 1, ys = 0; y <= y0_blk0 + y0_blk0_size - 1; y += 1, ys += 1)
{
#pragma omp simd aligned(u,v:32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r53 = -u[t0][x + 4][y + 4][z + 4];
r48[xs][ys][z + 1] = 1.0e-1F*(-(r53 + u[t0][x + 4][y + 4][z + 5])*r18[x + 1][y + 1][z + 1] - (r53 + u[t0][x + 4][y + 5][z + 4])*r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1] - (r53 + u[t0][x + 5][y + 4][z + 4])*r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]);
float r54 = -v[t0][x + 4][y + 4][z + 4];
r49[xs][ys][z + 1] = 1.0e-1F*(-(r54 + v[t0][x + 4][y + 4][z + 5])*r18[x + 1][y + 1][z + 1] - (r54 + v[t0][x + 4][y + 5][z + 4])*r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1] - (r54 + v[t0][x + 5][y + 4][z + 4])*r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= x0_blk0 + x0_blk0_size - 1; x += 1, xs += 1)
{
for (int y = y0_blk0, ys = 0; y <= y0_blk0 + y0_blk0_size - 1; y += 1, ys += 1)
{
#pragma omp simd aligned(damp,epsilon,u,v,vp:32)
for (int z = z_m; z <= z_M; z += 1)
{
float r60 = 1.0/dt;
float r59 = 1.0/(dt*dt);
float r58 = r18[x + 1][y + 1][z]*r49[xs + 1][ys + 1][z] - r18[x + 1][y + 1][z + 1]*r49[xs + 1][ys + 1][z + 1] + r19[x + 1][y][z + 1]*r20[x + 1][y][z + 1]*r49[xs + 1][ys][z + 1] - r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1]*r49[xs + 1][ys + 1][z + 1] + r20[x][y + 1][z + 1]*r21[x][y + 1][z + 1]*r49[xs][ys + 1][z + 1] - r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]*r49[xs + 1][ys + 1][z + 1];
float r57 = 1.0/(vp[x + 4][y + 4][z + 4]*vp[x + 4][y + 4][z + 4]);
float r56 = 1.0e-1F*(-r18[x + 1][y + 1][z]*r48[xs + 1][ys + 1][z] + r18[x + 1][y + 1][z + 1]*r48[xs + 1][ys + 1][z + 1] - r19[x + 1][y][z + 1]*r20[x + 1][y][z + 1]*r48[xs + 1][ys][z + 1] + r19[x + 1][y + 1][z + 1]*r20[x + 1][y + 1][z + 1]*r48[xs + 1][ys + 1][z + 1] - r20[x][y + 1][z + 1]*r21[x][y + 1][z + 1]*r48[xs][ys + 1][z + 1] + r20[x + 1][y + 1][z + 1]*r21[x + 1][y + 1][z + 1]*r48[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F*(u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 1.3333333e-2F*(u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 7.49999983e-2F*u[t0][x + 4][y + 4][z + 4];
float r55 = 1.0/(r57*r59 + r60*damp[x + 1][y + 1][z + 1]);
float r46 = r59*(-2.0F*u[t0][x + 4][y + 4][z + 4] + u[t1][x + 4][y + 4][z + 4]);
float r47 = r59*(-2.0F*v[t0][x + 4][y + 4][z + 4] + v[t1][x + 4][y + 4][z + 4]);
u[t2][x + 4][y + 4][z + 4] = r55*((-r46)*r57 + r56*(2*epsilon[x + 4][y + 4][z + 4] + 1) + 1.0e-1F*r58*r17[x + 1][y + 1][z + 1] + r60*(damp[x + 1][y + 1][z + 1]*u[t0][x + 4][y + 4][z + 4]));
v[t2][x + 4][y + 4][z + 4] = r55*((-r47)*r57 + r56*r17[x + 1][y + 1][z + 1] + 1.0e-1F*r58 + r60*(damp[x + 1][y + 1][z + 1]*v[t0][x + 4][y + 4][z + 4]));
}
}
}
}
}
}
}
/* Backdoor edit at Tue Sep 22 18:44:51 2020*/
|
residual.ompsimd.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#if (BLOCKCOPY_TILE_I != 10000)
#error gsrb.simd.c does not permit blocking in the unit stride (BLOCKCOPY_TILE_I != 10000)
#endif
//------------------------------------------------------------------------------------------------------------------------------
void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){
int block;
// exchange the boundary for x in prep for Ax...
exchange_boundary(level,x_id,stencil_get_shape());
apply_BCs(level,x_id,stencil_get_shape());
// now do residual/restriction proper...
double _timeStart = getTime();
double h2inv = 1.0/(level->h*level->h);
// loop over all block/tiles this process owns...
PRAGMA_THREAD_ACROSS_BLOCKS(level,block,level->num_my_blocks)
for(block=0;block<level->num_my_blocks;block++){
const int box = level->my_blocks[block].read.box;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int idim = level->my_blocks[block].dim.i;
const int jdim = level->my_blocks[block].dim.j;
const int kdim = level->my_blocks[block].dim.k;
const double h2inv = 1.0/(level->h*level->h);
const int ghosts = level->box_ghosts;
const int jStride = level->box_jStride;
const int kStride = level->box_kStride;
//const int jStride = level->my_boxes[box].jStride;
//const int kStride = level->my_boxes[box].kStride;
const int offset = ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); // offset from first ghost zone to first element this block operates on
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + offset;
#ifdef VECTOR_ALPHA
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + offset;
#endif
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + offset;
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + offset;
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + offset;
const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + offset;
double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + offset;
#ifdef __INTEL_COMPILER
//__assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_i,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_j,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_k,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(x ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(res ,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume( ( jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned
__assume( ( kStride) % BOX_ALIGN_JSTRIDE == 0);
__assume( jStride >= BOX_ALIGN_JSTRIDE);
__assume( kStride >= BOX_ALIGN_JSTRIDE);
__assume( idim > 0);
__assume( jdim > 0);
__assume( kdim > 0);
#elif __xlC__
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), res );
#endif
int i,j,k,ij;
#if 0
for(k=0;k<kdim;k++){
for(j=0;j<jdim;j++){
#if (_OPENMP>=201307) // OpenMP 4.0
#pragma omp simd aligned(alpha,beta_i,beta_j,beta_k,rhs,x,res:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=128
#pragma vector nontemporal // generally, we don't expect to reuse res
for(i=0;i<idim;i++){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(x);
res[ijk] = rhs[ijk]-Ax;
}}}
#else // fused IJ loop
for(k=0;k<kdim;k++){
#if (_OPENMP>=201307) // OpenMP 4.0
#pragma omp simd aligned(alpha,beta_i,beta_j,beta_k,rhs,x,res:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#pragma vector nontemporal // generally, we don't expect to reuse res
for(ij=0;ij<jdim*jStride;ij++){ // This walks into the ghost zone but should be completely SIMDizable
int ijk = ij + k*kStride;
double Ax = apply_op_ijk(x);
res[ijk] = rhs[ijk]-Ax;
}}
#endif
} // boxes
level->timers.residual += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
|
dependences_mutexinoutset.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// GCC 9 introduced codegen for mutexinoutset
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// clang 9 introduced codegen for mutexinoutset
// UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{
x++;
delay(100);
}
print_fuzzy_address(1);
print_ids(0);
#pragma omp task depend(mutexinoutset : x)
{
x++;
delay(100);
}
print_fuzzy_address(2);
print_ids(0);
#pragma omp task depend(in : x)
{ x = -1; }
print_ids(0);
}
}
x++;
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
|
phys_sfc_properties.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
With this program, orographies can be produced.
*/
#include <stdlib.h>
#include <stdio.h>
#include <netcdf.h>
#include <math.h>
#include <string.h>
#include <geos95.h>
#include "../../src/game_types.h"
#include "../../src/game_constants.h"
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
#define P_0 100000.0
const double MOUNTAIN_HEIGHT = 10e3;
const double MOUNTAIN_FWHM = 1000e3;
int set_sfc_properties(double latitude_scalar[], double longitude_scalar[], double roughness_length[], double sfc_albedo[],
double sfc_rho_c[], double t_conductivity[], double oro[], int is_land[], int oro_id)
{
double *oro_unfiltered = malloc(NO_OF_SCALARS_H*sizeof(double));
int ncid, retval, is_land_id;
if (oro_id == 2)
{
char is_land_file_pre[200];
sprintf(is_land_file_pre, "phys_quantities/B%d_is_land.nc", RES_ID);
char is_land_file[strlen(is_land_file_pre) + 1];
strcpy(is_land_file, is_land_file_pre);
if ((retval = nc_open(is_land_file, NC_NOWRITE, &ncid)))
ERR(retval);
if ((retval = nc_inq_varid(ncid, "is_land", &is_land_id)))
ERR(retval);
if ((retval = nc_get_var_int(ncid, is_land_id, &is_land[0])))
ERR(retval);
if ((retval = nc_close(ncid)))
ERR(retval);
}
// reading the ETOPO orography
int lat_in_id, lon_in_id, z_in_id;
int no_of_lat_points = 10801;
int no_of_lon_points = 21601;
double *latitude_input = malloc(no_of_lat_points*sizeof(double));
double *longitude_input = malloc(no_of_lon_points*sizeof(double));
int (*z_input)[no_of_lon_points] = malloc(sizeof(int[no_of_lat_points][no_of_lon_points]));
if (oro_id == 2)
{
if ((retval = nc_open("phys_quantities/etopo.nc", NC_NOWRITE, &ncid)))
ERR(retval);
if ((retval = nc_inq_varid(ncid, "y", &lat_in_id)))
ERR(retval);
if ((retval = nc_inq_varid(ncid, "x", &lon_in_id)))
ERR(retval);
if ((retval = nc_inq_varid(ncid, "z", &z_in_id)))
ERR(retval);
if ((retval = nc_get_var_double(ncid, lat_in_id, &latitude_input[0])))
ERR(retval);
if ((retval = nc_get_var_double(ncid, lon_in_id, &longitude_input[0])))
ERR(retval);
if ((retval = nc_get_var_int(ncid, z_in_id, &z_input[0][0])))
ERR(retval);
if ((retval = nc_close(ncid)))
ERR(retval);
}
// setting the unfiltered orography
int lat_index, lon_index;
double sigma_mountain = MOUNTAIN_FWHM/pow(8*log(2), 0.5); // only for oro_id == 1
double distance;
#pragma omp parallel for private(distance, lat_index, lon_index)
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
// default
oro[i] = 0;
oro_unfiltered[i] = 0;
if (oro_id == 1)
{
distance = calculate_distance_h(latitude_scalar[i], longitude_scalar[i], 0, 0, RADIUS);
oro[i] = MOUNTAIN_HEIGHT*exp(-pow(distance, 2)/(2*pow(sigma_mountain, 2)));
}
// real orography can only be different from zero at land points
if (oro_id == 2 && is_land[i] == 1)
{
double *lat_distance_vector = malloc(no_of_lat_points*sizeof(double));
double *lon_distance_vector = malloc(no_of_lon_points*sizeof(double));
for (int j = 0; j < no_of_lat_points; ++j)
{
lat_distance_vector[j] = fabs(deg2rad(latitude_input[j]) - latitude_scalar[i]);
}
for (int j = 0; j < no_of_lon_points; ++j)
{
lon_distance_vector[j] = fabs(deg2rad(longitude_input[j]) - longitude_scalar[i]);
}
lat_index = find_min_index(lat_distance_vector, no_of_lat_points);
lon_index = find_min_index(lon_distance_vector, no_of_lon_points);
oro_unfiltered[i] = z_input[lat_index][lon_index];
// check
if (oro_unfiltered[i] < -382 || oro_unfiltered[i] > 8850)
{
printf("Warning: value out of usual range.\n");
}
// freeing the memory
free(lat_distance_vector);
free(lon_distance_vector);
}
}
free(z_input);
free(latitude_input);
free(longitude_input);
// smoothing the real orography
int no_of_avg_points = 8;
int min_indices_vector[no_of_avg_points];
double distance_vector[NO_OF_SCALARS_H];
if (oro_id == 2)
{
#pragma omp parallel for private(min_indices_vector, distance_vector)
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
// finding the distance to the other grid points
for (int j = 0; j < NO_OF_SCALARS_H; ++j)
{
distance_vector[j] = calculate_distance_h(latitude_scalar[i], longitude_scalar[i], latitude_scalar[j], longitude_scalar[j], 1);
}
for (int j = 0; j < no_of_avg_points; ++j)
{
min_indices_vector[j] = -1;
}
for (int j = 0; j < no_of_avg_points; ++j)
{
min_indices_vector[j] = find_min_index_exclude(distance_vector, NO_OF_SCALARS_H, min_indices_vector, no_of_avg_points);
}
oro[i] = 0;
for (int j = 0; j < no_of_avg_points; ++j)
{
oro[i] += oro_unfiltered[min_indices_vector[j]]/no_of_avg_points;
}
}
}
free(oro_unfiltered);
printf("minimum orography: %lf m\n", oro[find_min_index(oro, NO_OF_SCALARS_H)]);
printf("maximum orography: %lf m\n", oro[find_max_index(oro, NO_OF_SCALARS_H)]);
double c_p_water = 4184.0;
double c_p_soil = 830.0;
double albedo_water = 0.06;
// setting the land surface albedo to 0.12 (compare Zdunkowski, Trautmann & Bott:
// Radiation in the Atmosphere, 2007, p. 444)
double albedo_soil = 0.12;
double density_soil = 1442.0;
double t_conductivity_soil = 7.5e-7;
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
// ocean
sfc_albedo[i] = albedo_water;
sfc_rho_c[i] = RHO_WATER*c_p_water;
roughness_length[i] = 0.08;
// for water this is set to some land-typical value, will not be used anyway
t_conductivity[i] = t_conductivity_soil;
// land
if (is_land[i] == 1)
{
sfc_albedo[i] = albedo_soil;
sfc_rho_c[i] = density_soil*c_p_soil;
roughness_length[i] = 0.2;
}
}
return 0;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN(16) int8_t i8[16];
SIMDE_ALIGN(16) int16_t i16[8];
SIMDE_ALIGN(16) int32_t i32[4];
SIMDE_ALIGN(16) int64_t i64[2];
SIMDE_ALIGN(16) uint8_t u8[16];
SIMDE_ALIGN(16) uint16_t u16[8];
SIMDE_ALIGN(16) uint32_t u32[4];
SIMDE_ALIGN(16) uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128[1];
SIMDE_ALIGN(16) simde_uint128 u128[1];
#endif
SIMDE_ALIGN(16) simde_float32 f32[4];
SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN(16) simde__m64_private m64_private[2];
SIMDE_ALIGN(16) simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN(16) __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) int8x16_t neon_i8;
SIMDE_ALIGN(16) int16x8_t neon_i16;
SIMDE_ALIGN(16) int32x4_t neon_i32;
SIMDE_ALIGN(16) int64x2_t neon_i64;
SIMDE_ALIGN(16) uint8x16_t neon_u8;
SIMDE_ALIGN(16) uint16x8_t neon_u16;
SIMDE_ALIGN(16) uint32x4_t neon_u32;
SIMDE_ALIGN(16) uint64x2_t neon_u64;
SIMDE_ALIGN(16) float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN(16) float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN(16) v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
#if !defined(__x86_64__) || !defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_round_ps (simde__m128 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndaq_f32(a_.neon_f32);
#elif defined(simde_math_roundf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */
r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0))));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps1 (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0
/* TODO: XLC documentation has it, but it doesn't seem to work.
* More investigation is necessary. */
r_.altivec_f32 = vec_reve(a_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#else
r_.f32[0] = mem_addr[0];
r_.f32[1] = mem_addr[1];
r_.f32[2] = mem_addr[2];
r_.f32[3] = mem_addr[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
# define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[0];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store1_ps(mem_addr, a);
#else
simde_mm_store_ps1(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2);
dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3);
#else
dest_->f32[0] = a_.f32[2];
dest_->f32[1] = a_.f32[3];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32);
#else
simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
GB_unop__identity_int32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fc64)
// op(A') function: GB (_unop_tran__identity_int32_fc64)
// C type: int32_t
// A type: GxB_FC64_t
// cast: int32_t cij = GB_cast_to_int32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fc64)
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return isVisible(Old) || New->isExternallyVisible();
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// \brief The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// \brief The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentCleanup(ParentCleanup),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// \brief A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
VisibleModuleSet VisibleModules;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Module, ///< 'module X;'
Partition, ///< 'module partition X;'
Implementation, ///< 'module implementation X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// \brief We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// \brief We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy,
bool EnumUnderlyingIsImplicit,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// \brief Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const FunctionDecl *Function,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed proccessing of attributes.
void ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive,
bool allowArrayTypes);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowParamOrMoveConstructible);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowParamOrMoveConstructible);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
llvm::InlineAsmIdentifierInfo &Info,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// \brief Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void EmitAvailabilityWarning(AvailabilityResult AR, NamedDecl *D,
StringRef Message, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// \brief Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList,
UsingDirectiveDecl * &UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
bool HasTypenameKeyword,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
SourceLocation EllipsisLoc,
AttributeList *AttrList,
bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
SourceLocation EllipsisLoc,
AttributeList *AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// \brief Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// \brief The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// \brief The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// \brief The location of the identifier.
SourceLocation IdentifierLoc;
/// \brief The location of the '::'.
SourceLocation CCLoc;
/// \brief Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::LambdaScopeInfo::Capture &From);
/// \brief Diagnose if an explicit lambda capture is unused.
void DiagnoseUnusedLambdaCapture(const sema::LambdaScopeInfo::Capture &From);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// \brief Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<Decl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// \brief After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// \brief CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// \brief Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// \brief The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
} Kind;
/// \brief Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// \brief The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// \brief The entity that is being synthesized.
Decl *Entity;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// \brief List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// \brief The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// \brief Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
}
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// \brief Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// \brief Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// \brief Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// \brief Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// \brief Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// \brief Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// \brief Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const Decl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Set to true inside '#pragma omp declare target' region.
bool IsInOpenMPDeclareTargetContext = false;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// \brief Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *IsOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// \brief Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// \brief Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer);
/// \brief Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D);
/// Return true inside OpenMP target region.
bool isInOpenMPDeclareTargetContext() const {
return IsInOpenMPDeclareTargetContext;
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// \brief Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// \brief Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// \brief Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// \brief Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const AttributeList *Attr);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartImpl(CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinMSVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
/// \brief The diagnostic we should emit for \c D, or \c AR_Available.
///
/// \param D The declaration to check. Note that this may be altered to point
/// to another declaration that \c D gets it's availability from. i.e., we
/// walk the list of typedefs to find an availability attribute.
///
/// \param Message If non-null, this will be populated with the message from
/// the availability attribute that is selected.
AvailabilityResult ShouldDiagnoseAvailabilityOfDecl(NamedDecl *&D,
std::string *Message);
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
/// \brief Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// \brief Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// \brief Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// \brief Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// \brief This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// \brief This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList, nullptr, false);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
nsincompressible.h | //*****************************************************************************
// Title : src/equation/nsincompressible.h
// Author : Tanabe Yuta
// Date : 2021/10/07
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
namespace PANSLBM2 {
namespace NSin {
// Function of updating macroscopic values of NS for 2D
template<class T, template<class>class P>
void Macro(T &_rho, T &_ux, T &_uy, const T *_f0, const T *_f, int _idx) {
_rho = _f0[_idx];
_ux = T();
_uy = T();
for (int c = 1; c < P<T>::nc; ++c) {
T f = _f[P<T>::IndexF(_idx, c)];
_rho += f;
_ux += P<T>::cx[c]*f;
_uy += P<T>::cy[c]*f;
}
}
// Function of getting equilibrium of NS for 2D
template<class T, template<class>class P>
void Equilibrium(T *_feq, T _rho, T _ux, T _uy) {
T rhouu = _rho - 1.5*(_ux*_ux + _uy*_uy);
for (int c = 0; c < P<T>::nc; ++c) {
T ciu = P<T>::cx[c]*_ux + P<T>::cy[c]*_uy;
_feq[c] = P<T>::ei[c]*(3.0*ciu + 4.5*ciu*ciu + rhouu);
}
}
// Function of applying external force of NS with Brinkman model for 2D
template<class T, template<class>class P>
void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _alpha, T *_f, int _idx) {
T coef = 3.0*_alpha*_rho/(_rho + _alpha);
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] -= coef*P<T>::ei[c]*(P<T>::cx[c]*_ux + P<T>::cy[c]*_uy);
}
}
// Function of Update macro and Collide of NS for 2D
template<class T, template<class>class P>
void MacroCollide(P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity, bool _issave = false) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// Collide
Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro, External force(Brinkman model) and Collide of NS for 2D
template<class T, template<class>class P>
void MacroBrinkmanCollide(P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity, const T *_alpha, bool _issave = false) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<T, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// Collide
Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of setting initial condition of NS for 2D
template<class T, template<class>class P>
void InitialCondition(P<T>& _p, const T *_rho, const T *_ux, const T *_uy) {
T feq[P<T>::nc];
for (int idx = 0; idx < _p.nxyz; ++idx) {
Equilibrium<T, P>(feq, _rho[idx], _ux[idx], _uy[idx]);
_p.f0[idx] = feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
_p.f[P<T>::IndexF(idx, c)] = feq[c];
}
}
}
// Function of setting boundary condition of NS set U for 2D
template<class T, template<class>class P, class Fv0, class Fv1, class Ff>
void BoundaryConditionSetU(P<T>& _p, Fv0 _uxbc, Fv1 _uybc, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T ux = _uxbc(0 + _p.offsetx, j + _p.offsety), uy = _uybc(0 + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T ux = _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety), uy = _uybc((_p.nx - 1) + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T ux = _uxbc(i + _p.offsetx, 0 + _p.offsety), uy = _uybc(i + _p.offsetx, 0 + _p.offsety);
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T rho0 = (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)]))/(1.0 + _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety));
T mx = 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - rho0*_uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety));
T my = rho0*_uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety)/6.0;
T ux = _uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety), uy = _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety);
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
}
// Function of setting boundary condition of NS set rho for 2D
template<class T, template<class>class P, class Fv0, class Fv1, class Ff>
void BoundaryConditionSetRho(P<T>& _p, Fv0 _rhobc, Fv1 _usbc, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T ux = _rhobc(0 + _p.offsetx, j + _p.offsety) - (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 4)] + 2.0*(_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)]));
T uy = _usbc(0 + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T ux = -_rhobc((_p.nx - 1) + _p.offsetx, j + _p.offsety) + (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 4)] + 2.0*(_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)]));
T uy = _usbc((_p.nx - 1) + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T ux = _usbc(i + _p.offsetx, 0 + _p.offsety);
T uy = _rhobc(i + _p.offsetx, 0 + _p.offsety) - (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)]));
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T ux = _usbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety);
T uy = -_rhobc(i + _p.offsetx, (_p.ny - 1) + _p.offsety) + (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)]));
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
}
}
} |
sapG_fmt_plug.c | /*
* this is a SAP PASSCODE (CODEVN G) plugin for john the ripper.
* tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-)
*
* please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it...
*
* (c) x7d8 sap loverz, public domain, btw
* cheers: see test-cases.
*
* Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and
* encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapG;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapG);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#include "simd-intrinsics.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "options.h"
#include "unicode.h"
#include "johnswap.h"
#define FORMAT_LABEL "sapg"
#define FORMAT_NAME "SAP CODVN F/G (PASSCODE)"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
static unsigned int omp_t = 1;
#if defined(_OPENMP)
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "memdbg.h"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define SALT_FIELD_LENGTH 40
#define USER_NAME_LENGTH 12 /* max. length of user name in characters */
#define SALT_LENGTH (USER_NAME_LENGTH * 4) /* bytes of UTF-8 */
#define PLAINTEXT_LENGTH 40 /* Characters */
#define UTF8_PLAINTEXT_LENGTH MIN(125, PLAINTEXT_LENGTH * 3) /* bytes */
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + 2*BINARY_SIZE) /* SALT + $ + 2x20 bytes for SHA1-representation */
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion
#define GETWORDPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define GETSTARTPOS(index) ( (index&(SIMD_COEF_32-1))*4 + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*20*SIMD_COEF_32 ) //for endianity conversion
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
//this array is from disp+work (sap's worker process)
#define MAGIC_ARRAY_SIZE 160
static const unsigned char theMagicArray[MAGIC_ARRAY_SIZE]=
{0x91, 0xAC, 0x51, 0x14, 0x9F, 0x67, 0x54, 0x43, 0x24, 0xE7, 0x3B, 0xE0, 0x28, 0x74, 0x7B, 0xC2,
0x86, 0x33, 0x13, 0xEB, 0x5A, 0x4F, 0xCB, 0x5C, 0x08, 0x0A, 0x73, 0x37, 0x0E, 0x5D, 0x1C, 0x2F,
0x33, 0x8F, 0xE6, 0xE5, 0xF8, 0x9B, 0xAE, 0xDD, 0x16, 0xF2, 0x4B, 0x8D, 0x2C, 0xE1, 0xD4, 0xDC,
0xB0, 0xCB, 0xDF, 0x9D, 0xD4, 0x70, 0x6D, 0x17, 0xF9, 0x4D, 0x42, 0x3F, 0x9B, 0x1B, 0x11, 0x94,
0x9F, 0x5B, 0xC1, 0x9B, 0x06, 0x05, 0x9D, 0x03, 0x9D, 0x5E, 0x13, 0x8A, 0x1E, 0x9A, 0x6A, 0xE8,
0xD9, 0x7C, 0x14, 0x17, 0x58, 0xC7, 0x2A, 0xF6, 0xA1, 0x99, 0x63, 0x0A, 0xD7, 0xFD, 0x70, 0xC3,
0xF6, 0x5E, 0x74, 0x13, 0x03, 0xC9, 0x0B, 0x04, 0x26, 0x98, 0xF7, 0x26, 0x8A, 0x92, 0x93, 0x25,
0xB0, 0xA2, 0x0D, 0x23, 0xED, 0x63, 0x79, 0x6D, 0x13, 0x32, 0xFA, 0x3C, 0x35, 0x02, 0x9A, 0xA3,
0xB3, 0xDD, 0x8E, 0x0A, 0x24, 0xBF, 0x51, 0xC3, 0x7C, 0xCD, 0x55, 0x9F, 0x37, 0xAF, 0x94, 0x4C,
0x29, 0x08, 0x52, 0x82, 0xB2, 0x3B, 0x4E, 0x37, 0x9F, 0x17, 0x07, 0x91, 0x11, 0x3B, 0xFD, 0xCD };
// For backwards compatibility, we must support salts padded with spaces to a field width of 40
static struct fmt_tests tests[] = {
{"DDIC$6066CD3147915331EC4C602847D27A75EB3E8F0A", "DDIC"},
/*
* invalid IRL because password is too short (would work during login,
* but not during password change). We use these tests anyway because
* they help verifying key buffer cleaning:
*/
{"F $646A0AD270DF651065669A45D171EDD62DFE39A1", "X"},
{"JOHNNY $7D79B478E70CAAE63C41E0824EAB644B9070D10A", "CYBERPUNK"},
{"VAN$D15597367F24090F0A501962788E9F19B3604E73", "hauser"},
{"ROOT$1194E38F14B9F3F8DA1B181F14DEB70E7BDCC239", "KID"},
// invalid, because password is too short (would work during login, but not during password change):
{"MAN$22886450D0AB90FDA7F91C4F3DD5619175B372EA", "u"},
// SAP user name consisting of 12 consecutive EURO characters:
{"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac"
"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac"
"$B20D15C088481780CD44FCF2003AAAFBD9710C7C", "--+----"},
{"SAP* $60A0F7E06D95BC9FB45F605BDF1F7B660E5D5D4E", "MaStEr"},
{"DOLLAR$$$---$E0180FD4542D8B6715E7D0D9EDE7E2D2E40C3D4D", "Dollar$$$---"},
{NULL}
};
static UTF8 (*saved_plain)[UTF8_PLAINTEXT_LENGTH + 1];
static int *keyLen;
#ifdef SIMD_COEF_32
// max intermediate crypt size is 256 bytes
// multiple key buffers for lengths > 55
#define LIMB 5
static unsigned char *saved_key[LIMB];
static unsigned char *crypt_key;
static unsigned char *interm_crypt;
static unsigned int *clean_pos;
#else
static UTF8 (*saved_key)[UTF8_PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
#endif
static struct saltstruct {
unsigned int l;
unsigned char s[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
static int warned = 0;
#ifdef SIMD_COEF_32
int i;
#endif
// This is needed in order NOT to upper-case german double-s
// in UTF-8 mode.
initUnicode(UNICODE_MS_NEW);
if (!options.listconf && options.target_enc != UTF_8 &&
!(options.flags & FLG_TEST_CHK) && warned++ == 0)
fprintf(stderr, "Warning: SAP-F/G format should always be UTF-8.\n"
"Use --target-encoding=utf8\n");
// Max 40 characters or 125 bytes of UTF-8, We actually do not truncate
// multibyte input at 40 characters later because it's too expensive.
if (options.target_enc == UTF_8)
self->params.plaintext_length = UTF8_PLAINTEXT_LENGTH;
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
keyLen = mem_calloc(self->params.max_keys_per_crypt, sizeof(*keyLen));
#ifdef SIMD_COEF_32
clean_pos = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*clean_pos));
for (i = 0; i < LIMB; i++)
saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
SHA_BUF_SIZ * 4,
MEM_ALIGN_SIMD);
interm_crypt = mem_calloc_align(self->params.max_keys_per_crypt,
20, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
20, MEM_ALIGN_SIMD);
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key = saved_plain;
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
int i;
#endif
MEM_FREE(crypt_key);
#ifdef SIMD_COEF_32
MEM_FREE(interm_crypt);
for (i = 0; i < LIMB; i++)
MEM_FREE(saved_key[i]);
MEM_FREE(clean_pos);
#endif
MEM_FREE(keyLen);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i, j;
char *p;
if (!ciphertext) return 0;
p = strrchr(ciphertext, '$');
if (!p) return 0;
if (p - ciphertext > SALT_FIELD_LENGTH) return 0;
if (strlen(&p[1]) != BINARY_SIZE * 2) return 0;
j = 0;
for (i = 0; i < p - ciphertext; i++) {
// even those lower case non-ascii characters with a
// corresponding upper case character could be rejected
if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z')
return 0;
else if (ciphertext[i] & 0x80)
j++;
// Reject if user name is longer than 12 characters.
// This is not accurate, but close enough.
// To be exact, I'd need to keep j unchanged for
// the first byte of each character, instead of
// incrementing j for every byte >= 0x80.
if (i >= USER_NAME_LENGTH + j && ciphertext[i] != ' ')
return 0;
}
// SAP user name cannot start with ! or ?
if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0;
// the user name must not simply be spaces, or empty
for (i = 0; i < p - ciphertext; ++i) {
if (ciphertext[i] == ' ')
continue;
break;
}
if (ciphertext[i] == '$') return 0;
p++;
// SAP and sap2john.pl always use upper case A-F for hashes,
// so don't allow a-f
for (i = 0; i < BINARY_SIZE * 2; i++)
if (!(((p[i]>='0' && p[i]<='9')) ||
((p[i]>='A' && p[i]<='F')) ))
return 0;
return 1;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
char *p;
static struct saltstruct out;
p = strrchr(ciphertext, '$');
out.l = (int)(p - ciphertext);
memset(out.s, 0, sizeof(out.s));
memcpy(out.s, ciphertext, out.l);
return &out;
}
static void clear_keys(void)
{
memset(keyLen, 0, sizeof(*keyLen) * omp_t * MAX_KEYS_PER_CRYPT);
}
static void set_key(char *key, int index)
{
memcpy((char*)saved_plain[index], key, UTF8_PLAINTEXT_LENGTH);
keyLen[index] = -1;
}
static char *get_key(int index) {
return (char*)saved_plain[index];
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for (;y<SIMD_PARA_SHA1*omp_t;y++)
#else
for (;y<SIMD_PARA_SHA1;y++)
#endif
for (x=0;x<SIMD_COEF_32;x++)
{
if ( ((unsigned int*)binary)[0] == ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5] )
return 1;
}
return 0;
#else
unsigned int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if ( (((unsigned int*)binary)[0] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5]) |
(((unsigned int*)binary)[1] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+SIMD_COEF_32]) |
(((unsigned int*)binary)[2] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+2*SIMD_COEF_32]) |
(((unsigned int*)binary)[3] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+3*SIMD_COEF_32])|
(((unsigned int*)binary)[4] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+4*SIMD_COEF_32]) )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
/*
* calculate the length of data that has to be hashed from the magic array. pass the first hash result in here.
* this is part of the walld0rf-magic
* The return value will always be between 32 and 82, inclusive
*/
#if SIMD_COEF_32
inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray, unsigned int index)
#else
inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray)
#endif
{
unsigned int modSum = 0;
#if SIMD_COEF_32
unsigned const char *p = &pbHashArray[GETOUTPOS(3, index)];
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
p += 4*(SIMD_COEF_32 - 1);
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
p += 4*(SIMD_COEF_32 - 1) + 2;
modSum += *p++ % 6;
modSum += *p % 6;
#else
unsigned int i;
for (i=0; i<=9; i++)
modSum += pbHashArray[i] % 6;
#endif
return modSum + 0x20; //0x20 is hardcoded...
}
/*
* Calculate the offset into the magic array. pass the first hash result in here
* part of the walld0rf-magic
* The return value will always be between 0 and 70, inclusive
*/
#if SIMD_COEF_32
inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray, unsigned int index)
#else
inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray)
#endif
{
unsigned int modSum = 0;
#if SIMD_COEF_32
unsigned const int *p = (unsigned int*)&pbHashArray[GETOUTPOS(11, index)];
unsigned int temp;
temp = *p & 0x0707;
modSum += (temp >> 8) + (unsigned char)temp;
p += SIMD_COEF_32;
temp = *p & 0x07070707;
modSum += (temp >> 24) + (unsigned char)(temp >> 16) +
(unsigned char)(temp >> 8) + (unsigned char)temp;
p += SIMD_COEF_32;
temp = *p & 0x07070707;
modSum += (temp >> 24) + (unsigned char)(temp >> 16) +
(unsigned char)(temp >> 8) + (unsigned char)temp;
#else
unsigned int i;
for (i = 19; i >= 10; i--)
modSum += pbHashArray[i] % 8;
#endif
return modSum;
}
#if SIMD_COEF_32
inline static void crypt_done(unsigned const int *source, unsigned int *dest, int index)
{
unsigned int i;
unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32];
unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32];
for (i = 0; i < 5; i++) {
*d = *s;
s += SIMD_COEF_32;
d += SIMD_COEF_32;
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if SIMD_COEF_32
#if defined(_OPENMP)
int t;
#pragma omp parallel for
for (t = 0; t < omp_t; t++)
#define ti ((unsigned int)t*NBKEYS+(unsigned int)index)
#else
#define t 0
#define ti (unsigned int)index
#endif
{
unsigned int index, i, longest;
int len;
unsigned int crypt_len[NBKEYS];
longest = 0;
for (index = 0; index < NBKEYS; index++) {
// Store key into vector key buffer
if ((len = keyLen[ti]) < 0) {
uint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETSTARTPOS(ti)];
#if ARCH_ALLOWS_UNALIGNED
const uint32_t *wkey = (uint32_t*)saved_plain[ti];
#else
char buf_aligned[UTF8_PLAINTEXT_LENGTH + 1] JTR_ALIGN(4);
char *key = (char*)saved_plain[ti];
const uint32_t *wkey = is_aligned(key, 4) ?
(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);
#endif
uint32_t temp;
len = 0;
while(((unsigned char)(temp = *wkey++))) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP(temp & 0xff);
len++;
break;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP(temp & 0xffff);
len+=2;
break;
}
*keybuf_word = JOHNSWAP(temp);
if (!(temp & 0xff000000))
{
len+=3;
break;
}
len += 4;
if (len & 63)
keybuf_word += SIMD_COEF_32;
else
keybuf_word = (uint32_t*)&saved_key[len>>6][GETSTARTPOS(ti)];
}
// Back-out of trailing spaces
while(len && saved_plain[ti][len - 1] == ' ')
saved_plain[ti][--len] = 0;
keyLen[ti] = len;
}
// 1. we need to SHA1 the password and username
for (i = 0; i < cur_salt->l; i++)
saved_key[(len+i)>>6][GETPOS((len + i), ti)] = cur_salt->s[i];
len += i;
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
// Clean rest of this buffer
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
for (; i < (((len+8)>>6)+1)*64; i += 4)
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;
// This should do good but Valgrind insists it's a waste
//if (clean_pos[ti] < i)
// clean_pos[ti] = len + 1;
if (len > longest)
longest = len;
((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;
crypt_len[index] = len;
}
SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&crypt_key[t*20*NBKEYS], NULL, SSEi_MIXED_IN);
// Do another and possibly a third limb
memcpy(&interm_crypt[t*20*NBKEYS], &crypt_key[t*20*NBKEYS], 20*NBKEYS);
for (i = 1; i < (((longest + 8) >> 6) + 1); i++) {
SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);
// Copy any output that is done now
for (index = 0; index < NBKEYS; index++)
if (((crypt_len[index] + 8) >> 6) == i)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
}
longest = 0;
for (index = 0; index < NBKEYS; index++) {
unsigned int offsetMagicArray;
unsigned int lengthIntoMagicArray;
const unsigned char *p;
int i;
// If final crypt ends up to be 56-61 bytes (or so), this must be clean
for (i = 0; i < LIMB; i++)
if (keyLen[ti] < i * 64 + 55)
((unsigned int*)saved_key[i])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = 0;
len = keyLen[ti];
lengthIntoMagicArray = extractLengthOfMagicArray(crypt_key, ti);
offsetMagicArray = extractOffsetToMagicArray(crypt_key, ti);
// 2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...
i = len - 1;
p = &theMagicArray[offsetMagicArray];
// Copy a char at a time until aligned (at destination)...
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = *p++;
// ...then a word at a time. This is a good boost, we are copying between 32 and 82 bytes here.
for (;i < lengthIntoMagicArray + len; i += 4, p += 4)
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = JOHNSWAP(*(uint32_t*)p);
// Now, the salt. This is typically too short for the stunt above.
for (i = 0; i < cur_salt->l; i++)
saved_key[(len+lengthIntoMagicArray+i)>>6][GETPOS((len + lengthIntoMagicArray + i), ti)] = cur_salt->s[i];
len += lengthIntoMagicArray + cur_salt->l;
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
crypt_len[index] = len;
// Clean the rest of this buffer as needed
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
for (; i < clean_pos[ti]; i += 4)
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;
clean_pos[ti] = len + 1;
if (len > longest)
longest = len;
((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;
}
SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], NULL, SSEi_MIXED_IN);
// Typically, no or very few crypts are done at this point so this is faster than to memcpy the lot
for (index = 0; index < NBKEYS; index++)
if (crypt_len[index] < 56)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
// Do another and possibly a third, fourth and fifth limb
for (i = 1; i < (((longest + 8) >> 6) + 1); i++) {
SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);
// Copy any output that is done now
for (index = 0; index < NBKEYS; index++)
if (((crypt_len[index] + 8) >> 6) == i)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
}
}
#undef t
#undef ti
#else
#ifdef _OPENMP
int index;
#pragma omp parallel for
for (index = 0; index < count; index++)
#else
#define index 0
#endif
{
unsigned int offsetMagicArray, lengthIntoMagicArray;
unsigned char temp_key[BINARY_SIZE];
unsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size...
SHA_CTX ctx;
if (keyLen[index] < 0) {
keyLen[index] = strlen((char*)saved_key[index]);
// Back-out of trailing spaces
while (saved_key[index][keyLen[index] - 1] == ' ') {
saved_key[index][--keyLen[index]] = 0;
if (keyLen[index] == 0) break;
}
}
//1. we need to SHA1 the password and username
memcpy(tempVar, saved_key[index], keyLen[index]); //first: the password
memcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username)
SHA1_Init(&ctx);
SHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l);
SHA1_Final((unsigned char*)temp_key, &ctx);
lengthIntoMagicArray = extractLengthOfMagicArray(temp_key);
offsetMagicArray = extractOffsetToMagicArray(temp_key);
//2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...
memcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray);
memcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l);
SHA1_Init(&ctx);
SHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l);
SHA1_Final((unsigned char*)crypt_key[index], &ctx);
}
#undef index
#endif
return count;
}
static void *get_binary(char *ciphertext)
{
static int outbuf[BINARY_SIZE / sizeof(int)];
char *realcipher = (char*)outbuf;
int i;
char* newCiphertextPointer;
newCiphertextPointer = strrchr(ciphertext, '$') + 1;
for (i=0;i<BINARY_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])];
}
#ifdef SIMD_COEF_32
alter_endianity((unsigned char*)realcipher, BINARY_SIZE);
#endif
return (void*)realcipher;
}
#if 0 // Not possible with current interface
static char *source(struct db_password *pw, char Buf[LINE_BUFFER_SIZE] )
{
struct saltstruct *salt_s = (struct saltstruct*)(pw->source);
unsigned char realcipher[BINARY_SIZE];
unsigned char *cpi;
char *cpo;
int i;
memcpy(realcipher, pw->binary, BINARY_SIZE);
#ifdef SIMD_COEF_32
alter_endianity(realcipher, BINARY_SIZE);
#endif
memcpy(Buf, salt_s->s, salt_s->l);
cpo = &Buf[salt_s->l];
*cpo++ = '$';
cpi = realcipher;
for (i = 0; i < BINARY_SIZE; ++i) {
*cpo++ = itoa16u[(*cpi)>>4];
*cpo++ = itoa16u[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
#endif
#ifdef SIMD_COEF_32
#define KEY_OFF (((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*5+(index&(SIMD_COEF_32-1)))
static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[KEY_OFF] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_6; }
#endif
// Here, we remove any salt padding and trim it to 44 bytes
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
char *p;
int i;
p = strrchr(ciphertext, '$');
i = (int)(p - ciphertext) - 1;
while (ciphertext[i] == ' ' || i >= SALT_LENGTH)
i--;
i++;
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, i);
strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i);
return out;
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
struct saltstruct *s = (struct saltstruct*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < s->l; i++)
hash = ((hash << 5) + hash) ^ s->s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_sapG = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || defined(SIMD_PARA_SHA1)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_UTF8,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
empirical_mixing_time.h | /*
* Created on: Oct 17, 2017
* Author: Hjalmar Boulouednine <hjalmar@b-nine.de>
*
* This file is part of the marathon software.
*
* Copyright (c) 2016, Steffen Rechner
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MARATHON_EMPIRICAL_MIXING_TIME_H
#define MARATHON_EMPIRICAL_MIXING_TIME_H
#include <unordered_map>
#include "marathon/state.h"
#include "marathon/mixing_time.h"
#include "statistics.h"
namespace marathon {
template<class T>
class EmpiricalMixingTimeCalculator {
protected:
const std::shared_ptr<MarkovChain> mc;
const std::function<T(const State &)> f;
public:
/**
* Create a calcuator object.
* @param mc Markov chain object.
* @param f Function object evaluated to determine the empirical mixing time.
*/
EmpiricalMixingTimeCalculator(
std::shared_ptr<MarkovChain> mc,
std::function<T(const State &)> f
) : mc(std::move(mc)), f(std::move(f)) {
}
/**
* Computes the number of steps until the mean value of the given metric,
* calculated for N copies of the given chain, is close enough to the given mean
* value.
* @param eps The value used to decide wether the two means are close enough.
* 0.01 was suitebale in most cases.
* @param mean The given "true" mean value.
* @param N The number of MarkovChain objects used du compute the mean value of
* each step.
* @return The number of steps until the computed mean is close to the given mean.
*/
int meanConvergenceTime(
double eps,
T mean,
int N) {
MarkovChain **mc_population = new MarkovChain *[N];
for (int i = 0; i < N; i++) {
mc_population[i] = mc->copy();
}
const int MAX_STEPS = 10000;
T val;
T first_val = 0;
int res = -1;
for (int i = 0; i < N; ++i) {
const State *s = mc_population[i]->getCurrentState();
first_val += f(s);
}
first_val = first_val / N;
for (int i = 1; i < MAX_STEPS; ++i) {
for (int i = 0; i < N; ++i) {
mc_population[i]->randomize(1);
const State *s = (State *) mc_population[i]->getCurrentState();
val += f(s);
}
// compute the current mean
val = val / N;
// normalize the mean using the given "true" mean
val = (val - mean) / (first_val - mean);
if (val < eps) {
res = i;
break;
}
}
// cleanup
for (int i = 0; i < N; i++)
delete mc_population[i];
delete[] mc_population;
return res;
}
/**
* Computes the number of steps until the variation distance of the distribution
* of the given metric, calculated for N copies of the given chain, and the
* given distribution is smaller than eps.
* @param mc MarkovChain object.
* @param N Number of samples used to approximate each sampling distribution.
* @param eps The value used to decide wether the two means are close enough.
* 0.01 was suitebale in most cases.
* @param q_limit Limiting distribution of the target function f.
* @param stepsize Number of steps after which the sampling histograms will be evaluated.
* @param Enable verbose output.
* @return Empirical Mixing Time.
*/
int empiricalMixingTime(
const std::unordered_map<T, double> q_limit,
const int N,
const double eps,
const int stepsize = 1,
const int bins = INT_MAX,
bool verbose = false
) {
// create a population of Markov chain objects
std::vector<std::unique_ptr<MarkovChain>> mc_population(N);
for (int i = 0; i < N; i++)
mc_population[i] = mc->copy();
// sampling distribution after t steps
std::unordered_map<T, double> q_t;
// auxiliary array to store evaluted function values
std::vector<T> tmp(N);
// number of steps (will be returned)
int t = 0;
// calculate initial variation distance
T s = f(mc->getCurrentState());
q_t[s] = 1.0;
double distance = variationDistance<T, double>(q_t, q_limit, bins);
/*std::cout << "distribution at step " << t << std::endl;
for(auto kv : q_t)
std::cout << kv.first << " " << kv.second << std::endl;
std::cout << t << "\t" << distance << std::endl;
std::cout << " -------------- " << std::endl;*/
// simulate Markov chains
while (distance > eps) {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
// evolve Markov chain
const State &s = mc_population[i]->randomize(stepsize);
// evaluate target function
tmp[i] = f(s);
}
// construct sampling histogram
q_t.clear();
for (int i = 0; i < N; ++i)
q_t[tmp[i]]++;
for (auto &kv : q_t)
kv.second /= N;
/*std::cout << "distribution at step " << t << std::endl;
for(auto kv : q_t)
std::cout << kv.first << " " << kv.second << std::endl;
std::cout << t << "\t" << distance << std::endl;
std::cout << " -------------- " << std::endl;*/
if (verbose)
std::cout << t << "\t" << distance << std::endl;
t += stepsize;
distance = variationDistance<T, double>(q_limit, q_t, bins);
}
if (verbose)
std::cout << t << "\t" << distance << std::endl;
return t;
}
};
}
#endif /* MARATHON_CONVERGENCE_H */ |
fibonacci.c | /*
* Copyright 2020 Ricardo Ortiz, ricardo.ortiz@alumnos.ucn.cl
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/**
*
* Fibonacci
*@author Ricardo Ortiz-Hidalgo
*/
//method that calculates the fibonacci
int fibonacci(int n) {
int i, j;
if (n<2)
return n;
else {
#pragma omp task shared(i)
i=fibonacci(n-1);
#pragma omp task shared(j)
j=fibonacci(n-2);
#pragma omp taskwait
return i+j;
}
}
// The main method
int main(int argc, char **argv){
int valor = 2;
int n, resultado;
char *a = argv[1];
n = atoi(a);
#pragma omp parallel
{
#pragma omp single
{
for(int i = 0; i<=n;i++)
{
result = fibonacci(n);
result= fibonacci(i);
printf("fibonacci de (%d) %d =",i,result);
if(result<5)
{
printf("%d\n",result);
}else
{
while (result != 1)
{
if(result%valor==0)
{
printf("%d",valor);
result=result/valor;
if (result != 1)
{
printf("X");
}
}else
{
valor++;
}
}
valor = 2;
printf("\n");
}
factoredFibonacci(result);
newResult= factoredFibonacci(result);
printf("FIB(%d) = %ld\n", i,newResult);
}
}
}
printf("Result is %d\n", result);
}
//method that calculates the factored fibonacci series
void factoredFibonacci(int numFibonacci) {
int div = 2;
if (numFibonacci <= 5) {
printf("%d", numFibonacci);
} else {
while (numFibonacci != 1) {
if (numFibonacci % div == 0) {
printf("%d ", div);
numFibonacci = numFibonacci / div;
if (numFibonacci != 1)
printf("x ");
} else {
div++;
}
}
}
}
|
collision.c | /**
* @file collision.c
* @brief Collision search routine.
* @author Hanno Rein <hanno@hanno-rein.de>
*
* @details A collision is defined as an overlap between two particles. This
* is only an approximation and works only if the timestep is small
* enough. More precisely, dt << v / Rp, where v is the typical velocity
* and Rp the radius of a particle. Furthermore, particles must be
* approaching each other at the time when they overlap.
*
*
* @section LICENSE
* Copyright (c) 2011 Hanno Rein, Shangfei Liu
*
* This file is part of rebound.
*
* rebound is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* rebound is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with rebound. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include "particle.h"
#include "collision.h"
#include "rebound.h"
#include "boundary.h"
#include "tree.h"
#ifdef MPI
#include "communication_mpi.h"
#endif // MPI
#define MIN(a, b) ((a) > (b) ? (b) : (a)) ///< Returns the minimum of a and b
#define MAX(a, b) ((a) > (b) ? (a) : (b)) ///< Returns the maximum of a and b
static void reb_tree_get_nearest_neighbour_in_cell(struct reb_simulation* const r, int* collisions_N, struct reb_ghostbox gb, struct reb_ghostbox gbunmod, int ri, double p1_r, double* nearest_r2, struct reb_collision* collision_nearest, struct reb_treecell* c);
void reb_collision_search(struct reb_simulation* const r){
const int N = r->N;
int collisions_N = 0;
const struct reb_particle* const particles = r->particles;
switch (r->collision){
case REB_COLLISION_NONE:
break;
case REB_COLLISION_DIRECT:
{
// Loop over ghost boxes, but only the inner most ring.
int nghostxcol = (r->nghostx>1?1:r->nghostx);
int nghostycol = (r->nghosty>1?1:r->nghosty);
int nghostzcol = (r->nghostz>1?1:r->nghostz);
for (int gbx=-nghostxcol; gbx<=nghostxcol; gbx++){
for (int gby=-nghostycol; gby<=nghostycol; gby++){
for (int gbz=-nghostzcol; gbz<=nghostzcol; gbz++){
// Loop over all particles
for (int i=0;i<N;i++){
struct reb_particle p1 = particles[i];
struct reb_ghostbox gborig = reb_boundary_get_ghostbox(r, gbx,gby,gbz);
struct reb_ghostbox gb = gborig;
// Precalculate shifted position
gb.shiftx += p1.x;
gb.shifty += p1.y;
gb.shiftz += p1.z;
gb.shiftvx += p1.vx;
gb.shiftvy += p1.vy;
gb.shiftvz += p1.vz;
// Loop over all particles again
for (int j=0;j<N;j++){
// Do not collide particle with itself.
if (i==j) continue;
struct reb_particle p2 = particles[j];
double dx = gb.shiftx - p2.x;
double dy = gb.shifty - p2.y;
double dz = gb.shiftz - p2.z;
double sr = p1.r + p2.r;
double r2 = dx*dx+dy*dy+dz*dz;
// Check if particles are overlapping
if (r2>sr*sr) continue;
double dvx = gb.shiftvx - p2.vx;
double dvy = gb.shiftvy - p2.vy;
double dvz = gb.shiftvz - p2.vz;
// Check if particles are approaching each other
if (dvx*dx + dvy*dy + dvz*dz >0) continue;
// Add particles to collision array.
if (r->collisions_allocatedN<=collisions_N){
// Allocate memory if there is no space in array.
// Doing it in chunks of 32 to avoid having to do it too often.
r->collisions_allocatedN += 32;
r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN);
}
r->collisions[collisions_N].p1 = i;
r->collisions[collisions_N].p2 = j;
r->collisions[collisions_N].gb = gborig;
collisions_N++;
}
}
}
}
}
}
break;
case REB_COLLISION_MERCURIUS:
{
struct reb_ghostbox gborig = reb_boundary_get_ghostbox(r, 0,0,0);
if (r->ri_mercurius.mode==1){ // encounters
// Loop over all particles
for (int i=0;i<N;i++){
struct reb_particle p1 = particles[i];
// Loop over all particles again
for (int j=0;j<N;j++){
// Do not collide particle with itself.
if (i==j) continue;
struct reb_particle p2 = particles[j];
double dx = p1.x - p2.x;
double dy = p1.y - p2.y;
double dz = p1.z - p2.z;
double sr = p1.r + p2.r;
double r2 = dx*dx+dy*dy+dz*dz;
// Check if particles are overlapping
if (r2>sr*sr) continue;
double dvx = p1.vx - p2.vx;
double dvy = p1.vy - p2.vy;
double dvz = p1.vz - p2.vz;
// Check if particles are approaching each other
if (dvx*dx + dvy*dy + dvz*dz >0) continue;
// Add particles to collision array.
if (r->collisions_allocatedN<=collisions_N){
// Allocate memory if there is no space in array.
// Doing it in chunks of 32 to avoid having to do it too often.
r->collisions_allocatedN += 32;
r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN);
}
r->collisions[collisions_N].p1 = i;
r->collisions[collisions_N].p2 = j;
r->collisions[collisions_N].gb = gborig;
collisions_N++;
}
}
}else{ // Only checking for collisions with star
struct reb_particle p1 = particles[0];
double dt = r->dt;
// Loop over all particles again
for (int j=1;j<N;j++){
// Simplified version of MERCURIUS collision prediction
// This only takes the current positions as volicities.
// Can be simplified.
struct reb_particle p2 = particles[j];
const double dxn = p1.x - p2.x;
const double dyn = p1.y - p2.y;
const double dzn = p1.z - p2.z;
const double dvxn = p1.vx - p2.vx;
const double dvyn = p1.vy - p2.vy;
const double dvzn = p1.vz - p2.vz;
const double rn = (dxn*dxn + dyn*dyn + dzn*dzn);
const double dxo = p1.x - p2.x - dt*dvxn;
const double dyo = p1.y - p2.y - dt*dvyn;
const double dzo = p1.z - p2.z - dt*dvzn;
const double dvxo = dvxn;
const double dvyo = dvyn;
const double dvzo = dvzn;
const double ro = (dxo*dxo + dyo*dyo + dzo*dzo);
const double drndt = (dxn*dvxn+dyn*dvyn+dzn*dvzn)*2.;
const double drodt = (dxo*dvxo+dyo*dvyo+dzo*dvzo)*2.;
const double a = 6.*(ro-rn)+3.*dt*(drodt+drndt);
const double b = 6.*(rn-ro)-2.*dt*(2.*drodt+drndt);
const double c = dt*drodt;
double rmin = MIN(rn,ro);
const double s = b*b-4.*a*c;
const double sr = sqrt(s);
const double tmin1 = (-b + sr)/(2.*a);
const double tmin2 = (-b - sr)/(2.*a);
if (tmin1>0. && tmin1<1.){
const double rmin1 = (1.-tmin1)*(1.-tmin1)*(1.+2.*tmin1)*ro
+ tmin1*tmin1*(3.-2.*tmin1)*rn
+ tmin1*(1.-tmin1)*(1.-tmin1)*dt*drodt
- tmin1*tmin1*(1.-tmin1)*dt*drndt;
rmin = MIN(MAX(rmin1,0.),rmin);
}
if (tmin2>0. && tmin2<1.){
const double rmin2 = (1.-tmin2)*(1.-tmin2)*(1.+2.*tmin2)*ro
+ tmin2*tmin2*(3.-2.*tmin2)*rn
+ tmin2*(1.-tmin2)*(1.-tmin2)*dt*drodt
- tmin2*tmin2*(1.-tmin2)*dt*drndt;
rmin = MIN(MAX(rmin2,0.),rmin);
}
const double spr = p1.r + p2.r;
// Check if particles are overlapping
if (rmin>spr*spr) continue;
// Add particles to collision array.
if (r->collisions_allocatedN<=collisions_N){
// Allocate memory if there is no space in array.
// Doing it in chunks of 32 to avoid having to do it too often.
r->collisions_allocatedN += 32;
r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN);
}
r->collisions[collisions_N].p1 = 0;
r->collisions[collisions_N].p2 = j;
r->collisions[collisions_N].gb = gborig;
collisions_N++;
r->ri_mercurius.recalculate_rhill_this_timestep = 1;
}
}
}
break;
case REB_COLLISION_TREE:
{
// Update and simplify tree.
// Prepare particles for distribution to other nodes.
reb_tree_update(r);
#ifdef MPI
// Distribute particles and add newly received particles to tree.
reb_communication_mpi_distribute_particles(r);
// Prepare essential tree (and particles close to the boundary needed for collisions) for distribution to other nodes.
reb_tree_prepare_essential_tree_for_collisions(r);
// Transfer essential tree and particles needed for collisions.
reb_communication_mpi_distribute_essential_tree_for_collisions(r);
#endif // MPI
// Loop over ghost boxes, but only the inner most ring.
int nghostxcol = (r->nghostx>1?1:r->nghostx);
int nghostycol = (r->nghosty>1?1:r->nghosty);
int nghostzcol = (r->nghostz>1?1:r->nghostz);
const struct reb_particle* const particles = r->particles;
const int N = r->N;
// Loop over all particles
#pragma omp parallel for schedule(guided)
for (int i=0;i<N;i++){
struct reb_particle p1 = particles[i];
struct reb_collision collision_nearest;
collision_nearest.p1 = i;
collision_nearest.p2 = -1;
double p1_r = p1.r;
double nearest_r2 = r->boxsize_max*r->boxsize_max/4.;
// Loop over ghost boxes.
for (int gbx=-nghostxcol; gbx<=nghostxcol; gbx++){
for (int gby=-nghostycol; gby<=nghostycol; gby++){
for (int gbz=-nghostzcol; gbz<=nghostzcol; gbz++){
// Calculated shifted position (for speedup).
struct reb_ghostbox gb = reb_boundary_get_ghostbox(r, gbx,gby,gbz);
struct reb_ghostbox gbunmod = gb;
gb.shiftx += p1.x;
gb.shifty += p1.y;
gb.shiftz += p1.z;
gb.shiftvx += p1.vx;
gb.shiftvy += p1.vy;
gb.shiftvz += p1.vz;
// Loop over all root boxes.
for (int ri=0;ri<r->root_n;ri++){
struct reb_treecell* rootcell = r->tree_root[ri];
if (rootcell!=NULL){
reb_tree_get_nearest_neighbour_in_cell(r, &collisions_N, gb, gbunmod,ri,p1_r,&nearest_r2,&collision_nearest,rootcell);
}
}
}
}
}
// Continue if no collision was found
if (collision_nearest.p2==-1) continue;
}
}
break;
default:
reb_exit("Collision routine not implemented.");
}
// randomize
for (int i=0;i<collisions_N;i++){
int new = rand()%collisions_N;
struct reb_collision c1 = r->collisions[i];
r->collisions[i] = r->collisions[new];
r->collisions[new] = c1;
}
// Loop over all collisions previously found in reb_collision_search().
int (*resolve) (struct reb_simulation* const r, struct reb_collision c) = r->collision_resolve;
if (resolve==NULL){
// Default is hard sphere
resolve = reb_collision_resolve_hardsphere;
}
for (int i=0;i<collisions_N;i++){
struct reb_collision c = r->collisions[i];
if (c.p1 != -1 && c.p2 != -1){
// Resolve collision
int outcome = resolve(r, c);
// Remove particles
if (outcome & 1){
// Remove p1
if (c.p2==r->N-1 && !(r->tree_root)){
// Particles swapped
c.p2 = c.p1;
}
reb_remove(r,c.p1,r->collision_resolve_keep_sorted);
// Check for pair
for (int j=i+1;j<collisions_N;j++){
struct reb_collision cp = r->collisions[j];
if (cp.p1==c.p1 || cp.p2==c.p1){
r->collisions[j].p1 = -1;
r->collisions[j].p2 = -1;
// Will be skipped.
}
if (cp.p1==r->N){
r->collisions[j].p1 = c.p1;
}
if (cp.p2==r->N){
r->collisions[j].p2 = c.p1;
}
}
}
if (outcome & 2){
// Remove p2
reb_remove(r,c.p2,r->collision_resolve_keep_sorted);
// Check for pair
for (int j=i+1;j<collisions_N;j++){
struct reb_collision cp = r->collisions[j];
if (cp.p1==c.p2 || cp.p2==c.p2){
r->collisions[j].p1 = -1;
r->collisions[j].p2 = -1;
// Will be skipped.
}
if (cp.p1==r->N){
r->collisions[j].p1 = c.p2;
}
if (cp.p2==r->N){
r->collisions[j].p2 = c.p2;
}
}
}
}
}
}
/**
* @brief Workaround for python setters.
**/
void reb_set_collision_resolve(struct reb_simulation* r, int (*resolve) (struct reb_simulation* const r, struct reb_collision c)){
r->collision_resolve = resolve;
}
/**
* @brief Find the nearest neighbour in a cell or its daughters.
* @details The function only returns a positive result if the particles
* are overlapping. Thus, the name nearest neighbour is not
* exactly true.
* @param r REBOUND simulation to work on.
* @param gb (Shifted) position and velocity of the particle.
* @param ri Index of the root box currently being searched in.
* @param p1_r Radius of the particle (this is not in gb).
* @param nearest_r2 Pointer to the nearest neighbour found so far.
* @param collision_nearest Pointer to the nearest collision found so far.
* @param c Pointer to the cell currently being searched in.
* @param collisions_N Pointer to current number of collisions
* @param gbunmod Ghostbox unmodified
*/
static void reb_tree_get_nearest_neighbour_in_cell(struct reb_simulation* const r, int* collisions_N, struct reb_ghostbox gb, struct reb_ghostbox gbunmod, int ri, double p1_r, double* nearest_r2, struct reb_collision* collision_nearest, struct reb_treecell* c){
const struct reb_particle* const particles = r->particles;
if (c->pt>=0){
// c is a leaf node
int condition = 1;
#ifdef MPI
int isloc = 1 ;
isloc = reb_communication_mpi_rootbox_is_local(r, ri);
if (isloc==1){
#endif // MPI
/**
* If this is a local cell, make sure particle is not colliding with itself.
* If this is a remote cell, the particle number might be the same, even for
* different particles.
* TODO: This can probably be written in a cleaner way.
*/
condition = (c->pt != collision_nearest->p1);
#ifdef MPI
}
#endif // MPI
if (condition){
struct reb_particle p2;
#ifdef MPI
if (isloc==1){
#endif // MPI
p2 = particles[c->pt];
#ifdef MPI
}else{
int root_n_per_node = r->root_n/r->mpi_num;
int proc_id = ri/root_n_per_node;
p2 = r->particles_recv[proc_id][c->pt];
}
#endif // MPI
double dx = gb.shiftx - p2.x;
double dy = gb.shifty - p2.y;
double dz = gb.shiftz - p2.z;
double r2 = dx*dx+dy*dy+dz*dz;
// A closer neighbour has already been found
//if (r2 > *nearest_r2) return;
double rp = p1_r+p2.r;
// reb_particles are not overlapping
if (r2 > rp*rp) return;
double dvx = gb.shiftvx - p2.vx;
double dvy = gb.shiftvy - p2.vy;
double dvz = gb.shiftvz - p2.vz;
// reb_particles are not approaching each other
if (dvx*dx + dvy*dy + dvz*dz >0) return;
// Found a new nearest neighbour. Save it for later.
*nearest_r2 = r2;
collision_nearest->ri = ri;
collision_nearest->p2 = c->pt;
collision_nearest->gb = gbunmod;
// Save collision in collisions array.
#pragma omp critical
{
if (r->collisions_allocatedN<=(*collisions_N)){
r->collisions_allocatedN += 32;
r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN);
}
r->collisions[(*collisions_N)] = *collision_nearest;
(*collisions_N)++;
}
}
}else{
// c is not a leaf node
double dx = gb.shiftx - c->x;
double dy = gb.shifty - c->y;
double dz = gb.shiftz - c->z;
double r2 = dx*dx + dy*dy + dz*dz;
double rp = p1_r + r->max_radius[1] + 0.86602540378443*c->w;
// Check if we need to decent into daughter cells
if (r2 < rp*rp ){
for (int o=0;o<8;o++){
struct reb_treecell* d = c->oct[o];
if (d!=NULL){
reb_tree_get_nearest_neighbour_in_cell(r, collisions_N, gb,gbunmod,ri,p1_r,nearest_r2,collision_nearest,d);
}
}
}
}
}
int reb_collision_resolve_hardsphere(struct reb_simulation* const r, struct reb_collision c){
struct reb_particle* const particles = r->particles;
struct reb_particle p1 = particles[c.p1];
struct reb_particle p2;
#ifdef MPI
int isloc = reb_communication_mpi_rootbox_is_local(r, c.ri);
if (isloc==1){
#endif // MPI
p2 = particles[c.p2];
#ifdef MPI
}else{
int root_n_per_node = r->root_n/r->mpi_num;
int proc_id = c.ri/root_n_per_node;
p2 = r->particles_recv[proc_id][c.p2];
}
#endif // MPI
// if (p1.lastcollision==t || p2.lastcollision==t) return;
struct reb_ghostbox gb = c.gb;
double x21 = p1.x + gb.shiftx - p2.x;
double y21 = p1.y + gb.shifty - p2.y;
double z21 = p1.z + gb.shiftz - p2.z;
double rp = p1.r+p2.r;
double oldvyouter;
if (x21>0){
oldvyouter = p1.vy;
}else{
oldvyouter = p2.vy;
}
if (rp*rp < x21*x21 + y21*y21 + z21*z21) return 0;
double vx21 = p1.vx + gb.shiftvx - p2.vx;
double vy21 = p1.vy + gb.shiftvy - p2.vy;
double vz21 = p1.vz + gb.shiftvz - p2.vz;
if (vx21*x21 + vy21*y21 + vz21*z21 >0) return 0; // not approaching
// Bring the to balls in the xy plane.
// NOTE: this could probabely be an atan (which is faster than atan2)
double theta = atan2(z21,y21);
double stheta = sin(theta);
double ctheta = cos(theta);
double vy21n = ctheta * vy21 + stheta * vz21;
double y21n = ctheta * y21 + stheta * z21;
// Bring the two balls onto the positive x axis.
double phi = atan2(y21n,x21);
double cphi = cos(phi);
double sphi = sin(phi);
double vx21nn = cphi * vx21 + sphi * vy21n;
// Coefficient of restitution
double eps= 1; // perfect bouncing by default
if (r->coefficient_of_restitution){
eps = r->coefficient_of_restitution(r, vx21nn);
}
double dvx2 = -(1.0+eps)*vx21nn;
double minr = (p1.r>p2.r)?p2.r:p1.r;
double maxr = (p1.r<p2.r)?p2.r:p1.r;
double mindv= minr*r->minimum_collision_velocity;
double _r = sqrt(x21*x21 + y21*y21 + z21*z21);
mindv *= 1.-(_r - maxr)/minr;
if (mindv>maxr*r->minimum_collision_velocity)mindv = maxr*r->minimum_collision_velocity;
if (dvx2<mindv) dvx2 = mindv;
// Now we are rotating backwards
double dvx2n = cphi * dvx2;
double dvy2n = sphi * dvx2;
double dvy2nn = ctheta * dvy2n;
double dvz2nn = stheta * dvy2n;
// Applying the changes to the particles.
#ifdef MPI
if (isloc==1){
#endif // MPI
const double p2pf = p1.m/(p1.m+p2.m);
particles[c.p2].vx -= p2pf*dvx2n;
particles[c.p2].vy -= p2pf*dvy2nn;
particles[c.p2].vz -= p2pf*dvz2nn;
particles[c.p2].lastcollision = r->t;
#ifdef MPI
}
#endif // MPI
const double p1pf = p2.m/(p1.m+p2.m);
particles[c.p1].vx += p1pf*dvx2n;
particles[c.p1].vy += p1pf*dvy2nn;
particles[c.p1].vz += p1pf*dvz2nn;
particles[c.p1].lastcollision = r->t;
// Return y-momentum change
if (x21>0){
r->collisions_plog += -fabs(x21)*(oldvyouter-particles[c.p1].vy) * p1.m;
r->collisions_Nlog ++;
}else{
r->collisions_plog += -fabs(x21)*(oldvyouter-particles[c.p2].vy) * p2.m;
r->collisions_Nlog ++;
}
return 0;
}
int reb_collision_resolve_merge(struct reb_simulation* const r, struct reb_collision c){
if (r->particles[c.p1].lastcollision==r->t || r->particles[c.p2].lastcollision==r->t) return 0;
// Every collision will cause two callbacks (with p1/p2 interchanged).
// Always remove particle with larger index and merge into lower index particle.
// This will keep N_active meaningful even after mergers.
int swap = 0;
int i = c.p1;
int j = c.p2; //want j to be removed particle
if (j<i){
swap = 1;
i = c.p2;
j = c.p1;
}
struct reb_particle* pi = &(r->particles[i]);
struct reb_particle* pj = &(r->particles[j]);
double invmass = 1.0/(pi->m + pj->m);
//Scale out energy from collision - initial energy
double Ei=0, Ef=0;
if(r->track_energy_offset){
{
double vx = pi->vx;
double vy = pi->vy;
double vz = pi->vz;
Ei += 0.5*pi->m*(vx*vx + vy*vy + vz*vz);
}
{
double vx = pj->vx;
double vy = pj->vy;
double vz = pj->vz;
Ei += 0.5*pj->m*(vx*vx + vy*vy + vz*vz);
}
{
double x = pi->x - pj->x;
double y = pi->y - pj->y;
double z = pi->z - pj->z;
double _r = sqrt(x*x + y*y + z*z);
Ei += - r->G*pi->m*pj->m/_r;
}
}
// Merge by conserving mass, volume and momentum
pi->vx = (pi->vx*pi->m + pj->vx*pj->m)*invmass;
pi->vy = (pi->vy*pi->m + pj->vy*pj->m)*invmass;
pi->vz = (pi->vz*pi->m + pj->vz*pj->m)*invmass;
pi->x = (pi->x*pi->m + pj->x*pj->m)*invmass;
pi->y = (pi->y*pi->m + pj->y*pj->m)*invmass;
pi->z = (pi->z*pi->m + pj->z*pj->m)*invmass;
pi->m = pi->m + pj->m;
pi->r = pow(pow(pi->r,3.)+pow(pj->r,3.),1./3.);
pi->lastcollision = r->t;
// Keeping track of energy offst
if(r->track_energy_offset){
{
double vx = pi->vx;
double vy = pi->vy;
double vz = pi->vz;
Ef += 0.5*pi->m*(vx*vx + vy*vy + vz*vz);
}
r->energy_offset += Ei - Ef;
}
// If hermes calculate energy offset in global - hasn't been removed from global yet
if (r->ri_hermes.global){
if(r->ri_hermes.global->ri_hermes.mini_active){
r->ri_hermes.global->ri_hermes.collision_this_global_dt = 1;
}
}
return swap?1:2; // Remove particle p2 from simulation
}
|
GB_binop__islt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint64)
// A*D function (colscale): GB (_AxD__islt_uint64)
// D*A function (rowscale): GB (_DxB__islt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint64)
// C=scalar+B GB (_bind1st__islt_uint64)
// C=scalar+B' GB (_bind1st_tran__islt_uint64)
// C=A+scalar GB (_bind2nd__islt_uint64)
// C=A'+scalar GB (_bind2nd_tran__islt_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT64 || GxB_NO_ISLT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _sum = (v4f32)__msa_fill_w(0);
if (bias_data_ptr)
{
_sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0);
}
const float* kptr = (const float*)weight_data_pack4.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = (y * kernel_w + x) * 16;
v4f32 _val0 = (v4f32)__msa_fill_w_f32(*sptr++);
v4f32 _val1 = (v4f32)__msa_fill_w_f32(*sptr++);
v4f32 _val2 = (v4f32)__msa_fill_w_f32(*sptr++);
v4f32 _val3 = (v4f32)__msa_fill_w_f32(*sptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr + k, 0);
v4f32 _w1 = (v4f32)__msa_ld_w(kptr + k + 4, 0);
v4f32 _w2 = (v4f32)__msa_ld_w(kptr + k + 8, 0);
v4f32 _w3 = (v4f32)__msa_ld_w(kptr + k + 12, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
_sum = __msa_fmadd_w(_sum, _val1, _w1);
_sum = __msa_fmadd_w(_sum, _val2, _w2);
_sum = __msa_fmadd_w(_sum, _val3, _w3);
}
}
kptr += maxk * 16;
}
_sum = activation_ps(_sum, activation_type, activation_params);
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
}
outptr += outw * 4;
}
}
}
|
owl_matrix_swap_impl_omp.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef OWL_ENABLE_TEMPLATE
// swap row i and row j in x(m,n)
void FUNCTION (c, swap_rows) (TYPE *x, int m, int n, int i, int j) {
if (i != j) {
TYPE * src = x + n * i;
TYPE * dst = x + n * j;
if (n >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int k = 0; k < n; k++) {
TYPE t = *(src + k);
*(src + k) = *(dst + k);
*(dst + k) = t;
}
}
else {
for (int k = 0; k < n; k++) {
TYPE t = *(src + k);
*(src + k) = *(dst + k);
*(dst + k) = t;
}
}
}
}
// stub function of swap_rows
CAMLprim value FUNCTION (stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
int m = Long_val(vM);
int n = Long_val(vN);
int i = Long_val(vI);
int j = Long_val(vJ);
FUNCTION (c, swap_rows) (X_data, m, n, i, j);
return Val_unit;
}
// swap column i and column j in x(m,n)
void FUNCTION (c, swap_cols) (TYPE *x, int m, int n, int i, int j) {
if (i != j) {
TYPE * src = x + i;
TYPE * dst = x + j;
if (m >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int k = 0; k < m; k++) {
int base = k * n;
TYPE t = *(src + base);
*(src + base) = *(dst + base);
*(dst + base) = t;
}
}
else {
int base = 0;
for (int k = 0; k < m; k++) {
TYPE t = *(src + base);
*(src + base) = *(dst + base);
*(dst + base) = t;
base += n;
}
}
}
}
// stub function of swap_cols
CAMLprim value FUNCTION (stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
int m = Long_val(vM);
int n = Long_val(vN);
int i = Long_val(vI);
int j = Long_val(vJ);
FUNCTION (c, swap_cols) (X_data, m, n, i, j);
return Val_unit;
}
// transpose x(m,n) and save to y(n,m)
void FUNCTION (c, transpose) (TYPE *x, TYPE *y, int m, int n) {
int ofsx = 0;
int ofsy = 0;
if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
*(y + i + j * m) = *(x + j + i * n);
}
}
}
else {
for (int i = 0; i < m; i++) {
ofsy = i;
for (int j = 0; j < n; j++) {
*(y + ofsy) = *(x + ofsx);
ofsy += m;
ofsx += 1;
}
}
}
}
// stub function of transpose
CAMLprim value FUNCTION (stub, transpose) (value vX, value vY) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
TYPE *Y_data = (TYPE *) Y->data;
FUNCTION (c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]);
return Val_unit;
}
// conjugate transpose x(m,n) and save to y(n,m)
void FUNCTION (c, ctranspose) (TYPE *x, TYPE *y, int m, int n) {
int ofsx = 0;
int ofsy = 0;
if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
*(y + i + j * m) = CONJ_FUN(*(x + j + i * n));
}
}
}
else {
for (int i = 0; i < m; i++) {
ofsy = i;
for (int j = 0; j < n; j++) {
*(y + ofsy) = CONJ_FUN(*(x + ofsx));
ofsy += m;
ofsx += 1;
}
}
}
}
// stub function of ctranspose
CAMLprim value FUNCTION (stub, ctranspose) (value vX, value vY) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
TYPE *Y_data = (TYPE *) Y->data;
FUNCTION (c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]);
return Val_unit;
}
#endif /* OWL_ENABLE_TEMPLATE */
|
GB_unaryop__abs_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int16
// op(A') function: GB_tran__abs_bool_int16
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int16
(
bool *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ellke.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *ellke(PyObject *self, PyObject *args);
static PyObject *ellke(PyObject *self, PyObject *args)
{
PyArrayObject *k, *ek, *kk;
int i;
double a0,a1,a2,a3,a4,b0,b1,b2,b3,b4,m1;
npy_intp dims[1];
if(!PyArg_ParseTuple(args,"O", &k))
{
return NULL;
}
a1=0.44325141463;
a2=0.06260601220;
a3=0.04757383546;
a4=0.01736506451;
b1=0.24998368310;
b2=0.09200180037;
b3=0.04069697526;
b4=0.00526449639;
dims[0] = k->dimensions[0];
ek = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for private(m1)
for(i=0;i<dims[0];i++)
{
m1 = 1.-pow(IND(k,i),2);
IND(ek,i) = 1.+m1*(a1+m1*(a2+m1*(a3+m1*a4))) - m1*(b1+m1*(b2+m1*(b3+m1*b4)))*log(m1);
}
a0=1.38629436112;
a1=0.09666344259;
a2=0.03590092383;
a3=0.03742563713;
a4=0.01451196212;
b0=0.5;
b1=0.12498593597;
b2=0.06880248576;
b3=0.03328355346;
b4=0.00441787012;
kk = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for private(m1)
for(i=0;i<dims[0];i++)
{
m1 = 1.-pow(IND(k,i),2);
IND(kk,i) = a0+m1*(a1+m1*(a2+m1*(a3+m1*a4))) - (b0+m1*(b1+m1*(b2+m1*(b3+m1*b4))))*log(m1);
}
return Py_BuildValue("NN",ek,kk);
}
static char module_docstring[]="\
Computes Hasting's polynomial approximation for the complete\n\
elliptic integral of the first (ek) and second (kk) kind.\n\
\n\
Parameters\n\
----------\n\
k: 1D NPY ARRAY - contains values from trquad.py\n\
\n\
Returns\n\
-------\n\
ek: 1D NPY ARRAY - elliptic integral of the first kind\n\
kk: 1D NPY ARRAY - elliptic integral of the second kind\n\
\n\
Revisions\n\
---------\n\
Original version by Jason Eastman\n\
2012-08-25 Kevin Stevenson, UChicago \n\
kbs@uchicago.edu\n\
Converted from Python\n\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\n\
";
static PyMethodDef module_methods[] = {
{"ellke", ellke,METH_VARARGS,module_docstring},{NULL}};
// static char module_docstring[] =
// "This module is used to calcuate the ellke";
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_ellke(void)
#else
initellke(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"ellke", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("ellke", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
GB_unop__identity_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_uint16)
// op(A') function: GB (_unop_tran__identity_int8_uint16)
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_uint16)
(
int8_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_uint8
// op(A') function: GB_tran__ainv_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_uint8
(
float *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
maxwell_semi_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
* OpenMP Problems
*
* Need to fix the way these variables are set and incremented in loops:
* nElements, nElements_iedges, nFaces, nFaces_iedges, nEdges, nEdges_iedges,
* nElements_Faces, nElements_Edges,
* j, l, k (these three only where they are listed at the end of SMP_PRIVATE)
*
******************************************************************************/
#include "_hypre_sstruct_ls.h"
/*--------------------------------------------------------------------------
* hypre_Maxwell_Interp.c
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CreatePTopology(void **PTopology_vdata_ptr)
{
hypre_PTopology *PTopology;
HYPRE_Int ierr= 0;
PTopology= hypre_CTAlloc(hypre_PTopology, 1);
(PTopology -> Face_iedge) = NULL;
(PTopology -> Element_iedge)= NULL;
(PTopology -> Edge_iedge) = NULL;
(PTopology -> Element_Face) = NULL;
(PTopology -> Element_Edge) = NULL;
*PTopology_vdata_ptr= (void *) PTopology;
return ierr;
}
HYPRE_Int
hypre_DestroyPTopology(void *PTopology_vdata)
{
hypre_PTopology *PTopology= (hypre_PTopology *)PTopology_vdata;
HYPRE_Int ierr = 0;
if (PTopology)
{
if ( (PTopology -> Face_iedge) != NULL)
{
HYPRE_IJMatrixDestroy(PTopology -> Face_iedge);
}
HYPRE_IJMatrixDestroy(PTopology -> Element_iedge);
HYPRE_IJMatrixDestroy(PTopology -> Edge_iedge);
if ( (PTopology -> Element_Face) != NULL)
{
HYPRE_IJMatrixDestroy(PTopology -> Element_Face);
}
HYPRE_IJMatrixDestroy(PTopology -> Element_Edge);
}
hypre_TFree(PTopology);
return ierr;
}
hypre_IJMatrix *
hypre_Maxwell_PTopology( hypre_SStructGrid *fgrid_edge,
hypre_SStructGrid *cgrid_edge,
hypre_SStructGrid *fgrid_face,
hypre_SStructGrid *cgrid_face,
hypre_SStructGrid *fgrid_element,
hypre_SStructGrid *cgrid_element,
hypre_ParCSRMatrix *Aee,
hypre_Index rfactor,
void *PTopology_vdata)
{
MPI_Comm comm = (fgrid_element -> comm);
hypre_PTopology *PTopology= (hypre_PTopology *)PTopology_vdata;
hypre_IJMatrix *Face_iedge;
hypre_IJMatrix *Element_iedge;
hypre_IJMatrix *Edge_iedge;
hypre_IJMatrix *Element_Face;
hypre_IJMatrix *Element_Edge;
hypre_IJMatrix *edge_Edge;
hypre_SStructPGrid *p_cgrid, *p_fgrid;
hypre_StructGrid *var_cgrid, *var_fgrid;
hypre_BoxArray *cboxes, *fboxes, *box_array;
hypre_Box *cbox, *fbox, *cellbox, *vbox, copy_box;
hypre_BoxArray **contract_fedgeBoxes;
hypre_Index **Edge_cstarts, **upper_shifts, **lower_shifts;
HYPRE_Int **cfbox_mapping, **fcbox_mapping;
hypre_BoxManEntry *entry;
HYPRE_Int rank, rank2;
HYPRE_Int start_rank1;
HYPRE_Int nFaces, nEdges, nElements, nedges;
HYPRE_Int nxFaces, nyFaces, nzFaces, nxEdges, nyEdges, nzEdges;
HYPRE_Int n_xFace_iedges, n_yFace_iedges, n_zFace_iedges;
HYPRE_Int n_Cell_iedges;
HYPRE_Int nElements_iedges, nFaces_iedges, nEdges_iedges;
HYPRE_Int nElements_Faces, nElements_Edges;
HYPRE_Int *iFace, *iEdge, *iElement, *iedgeEdge;
HYPRE_Int *jFace_edge, *jEdge_iedge, *jElement_edge;
HYPRE_Int *jElement_Face, *jElement_Edge, *jedge_Edge;
HYPRE_Real *vals_ElementEdge, *vals_ElementFace, *vals_edgeEdge, *vals_Faceedge;
HYPRE_Real *vals_Elementedge, *vals_Edgeiedge;
HYPRE_Int *ncols_Elementedge, *ncols_Edgeiedge, *ncols_edgeEdge, *ncols_Faceedge;
HYPRE_Int *ncols_ElementFace, *ncols_ElementEdge;
HYPRE_Int *bdryedge_location;
HYPRE_Real fCedge_ratio;
HYPRE_Real *stencil_vals, *upper, *lower, *diag, *face_w1, *face_w2;
HYPRE_Int *off_proc_flag;
hypre_Index cindex;
hypre_Index findex;
hypre_Index var_index, cell_index, *boxoffset, *suboffset;
hypre_Index loop_size, start, cstart, stride, low_index, hi_index;
hypre_Index ishift, jshift, kshift, zero_index, one_index;
hypre_Index lindex;
HYPRE_Int n_boxoffsets;
HYPRE_Int nparts= hypre_SStructGridNParts(fgrid_element);
HYPRE_Int ndim = hypre_SStructGridNDim(fgrid_element);
HYPRE_SStructVariable *vartypes, *Edge_vartypes, *Face_vartypes;
hypre_Index *varoffsets;
HYPRE_Int *vartype_map;
HYPRE_Int matrix_type= HYPRE_PARCSR;
HYPRE_Int nvars, Face_nvars, Edge_nvars, part, var, box, fboxi;
HYPRE_Int tot_vars= 8;
HYPRE_Int t, i, j, k, l, m, n, p, r;
HYPRE_Int ilower, iupper;
HYPRE_Int jlower, jupper;
HYPRE_Int **flower_ranks, **fupper_ranks;
HYPRE_Int **clower_ranks, **cupper_ranks;
HYPRE_Int ***n_CtoVbox, ****CtoVboxnums;
HYPRE_Int *num_vboxes, **vboxnums;
HYPRE_Int size1;
HYPRE_Int trueV = 1;
HYPRE_Int falseV= 0;
HYPRE_Int row_in;
HYPRE_Int myproc;
hypre_BoxInit(©_box, ndim);
hypre_MPI_Comm_rank(comm, &myproc);
hypre_SetIndex3(ishift, 1, 0, 0);
hypre_SetIndex3(jshift, 0, 1, 0);
hypre_SetIndex3(kshift, 0, 0, 1);
hypre_ClearIndex(zero_index);
hypre_ClearIndex(one_index);
for (i= 0; i< ndim; i++)
{
one_index[i]= 1;
}
/* set rfactor[2]= 1 if ndim=2. */
if (ndim == 2)
{
rfactor[2]= 1;
}
/*-------------------------------------------------------------------
* Find the coarse-fine connection pattern, i.e., the topology
* needed to create the interpolation operators.
* Face_iedge, Edge_iedge, Element_iedge, Element_Face, Element_Edge,
* and edge_Edge connections are defined in terms of parcsr_matrices.
* These connections are determined using the cell-centred grids.
* Note that we are assuming the variable type enumeration
* given in hypre_SStructVariable_enum.
*
* We consider both 2-d and 3-d cases. In 2-d, the edges are faces.
* We will continue to call them edges, but use the face variable
* enumeration.
*-------------------------------------------------------------------*/
varoffsets= hypre_CTAlloc(hypre_Index, tot_vars);
/* total of 8 variable types. Create a mapping between user enumeration
to hypre enumeration. Only need for face and edge grids. */
vartype_map= hypre_CTAlloc(HYPRE_Int, 8);
part= 0;
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part); /* face cgrid */
nvars = hypre_SStructPGridNVars(p_cgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (i= 0; i< nvars; i++)
{
t= vartypes[i];
switch(t)
{
case 2:
{
vartype_map[2]= i;
break;
}
case 3:
{
vartype_map[3]= i;
break;
}
case 4:
{
vartype_map[4]= i;
break;
}
}
}
if (ndim == 3)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); /* edge cgrid */
nvars = hypre_SStructPGridNVars(p_cgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (i= 0; i< nvars; i++)
{
t= vartypes[i];
switch(t)
{
case 5:
{
vartype_map[5]= i;
break;
}
case 6:
{
vartype_map[6]= i;
break;
}
case 7:
{
vartype_map[7]= i;
break;
}
}
}
}
/* local sizes */
nFaces = 0;
nEdges = 0;
nElements= 0;
nedges = 0;
nxFaces = 0;
nyFaces = 0;
nzFaces = 0;
nxEdges = 0;
nyEdges = 0;
nzEdges = 0;
for (part= 0; part< nparts; part++)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part); /* cell cgrid */
var_cgrid = hypre_SStructPGridCellSGrid(p_cgrid) ;
nElements+= hypre_StructGridLocalSize(var_cgrid);
t= 0;
hypre_SStructVariableGetOffset((hypre_SStructVariable) t,
ndim, varoffsets[0]);
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part); /* face cgrid */
nvars = hypre_SStructPGridNVars(p_cgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (var= 0; var< nvars; var++)
{
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, var);
t= vartypes[var];
nFaces+= hypre_StructGridLocalSize(var_cgrid);
switch(t)
{
case 2:
nxFaces+= hypre_StructGridLocalSize(var_cgrid);
break;
case 3:
nyFaces+= hypre_StructGridLocalSize(var_cgrid);
break;
case 4:
nzFaces+= hypre_StructGridLocalSize(var_cgrid);
break;
}
hypre_SStructVariableGetOffset((hypre_SStructVariable) t,
ndim, varoffsets[t]);
}
/* 2-d vs 3-d case */
if (ndim < 3)
{
nEdges = nFaces;
nxEdges= nxFaces;
nyEdges= nyFaces;
nzEdges= 0;
}
else
{
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); /* edge cgrid */
nvars = hypre_SStructPGridNVars(p_cgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (var= 0; var< nvars; var++)
{
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, var);
t= vartypes[var];
nEdges+= hypre_StructGridLocalSize(var_cgrid);
switch(t)
{
case 5:
nxEdges+= hypre_StructGridLocalSize(var_cgrid);
break;
case 6:
nyEdges+= hypre_StructGridLocalSize(var_cgrid);
break;
case 7:
nzEdges+= hypre_StructGridLocalSize(var_cgrid);
break;
}
hypre_SStructVariableGetOffset((hypre_SStructVariable) t,
ndim, varoffsets[t]);
}
}
/* same for 2-d & 3-d, assuming that fgrid_edge= fgrid_face in input */
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part); /* edge fgrid */
nvars = hypre_SStructPGridNVars(p_fgrid);
vartypes= hypre_SStructPGridVarTypes(p_fgrid);
for (var= 0; var< nvars; var++)
{
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, var);
nedges += hypre_StructGridLocalSize(var_fgrid);
}
}
/*--------------------------------------------------------------------------
* Form mappings between the c & f box numbers. Note that a cbox
* can land inside only one fbox since the latter was contracted. Without
* the extraction, a cbox can land in more than 1 fboxes (e.g., cbox
* boundary extending into other fboxes). These mappings are for the
* cell-centred boxes.
* Check: Other variable boxes should follow this mapping, by
* property of the variable-shifted indices? Can the cell-centred boundary
* indices of a box be non-cell-centred indices for another box?
*
* Also determine contracted cell-centred fboxes.
*--------------------------------------------------------------------------*/
cfbox_mapping= hypre_TAlloc(HYPRE_Int *, nparts);
fcbox_mapping= hypre_TAlloc(HYPRE_Int *, nparts);
contract_fedgeBoxes= hypre_TAlloc(hypre_BoxArray *, nparts);
Edge_cstarts= hypre_TAlloc(hypre_Index *, nparts);
upper_shifts= hypre_TAlloc(hypre_Index *, nparts);
lower_shifts= hypre_TAlloc(hypre_Index *, nparts);
for (i= 0; i< nparts; i++)
{
p_fgrid = hypre_SStructGridPGrid(fgrid_element, i);
var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
j = hypre_BoxArraySize(fboxes);
fcbox_mapping[i]= hypre_CTAlloc(HYPRE_Int, j);
p_cgrid = hypre_SStructGridPGrid(cgrid_element, i);
var_cgrid= hypre_SStructPGridCellSGrid(p_cgrid);
cboxes = hypre_StructGridBoxes(var_cgrid);
j = hypre_BoxArraySize(fboxes);
cfbox_mapping[i]= hypre_CTAlloc(HYPRE_Int, j);
/* assuming if i1 > i2 and (box j1) is coarsened from (box i1)
and (box j2) from (box i2), then j1 > j2. */
k= 0;
hypre_ForBoxI(j, fboxes)
{
fbox= hypre_BoxArrayBox(fboxes, j);
hypre_CopyBox(fbox, ©_box);
hypre_ProjectBox(©_box, zero_index, rfactor);
hypre_StructMapFineToCoarse(hypre_BoxIMin(©_box), zero_index,
rfactor, hypre_BoxIMin(©_box));
hypre_StructMapFineToCoarse(hypre_BoxIMax(©_box), zero_index,
rfactor, hypre_BoxIMax(©_box));
/* since the ordering of the cboxes was determined by the fbox
ordering, we only have to check if the first cbox in the
list intersects with copy_box. If not, this fbox vanished in the
coarsening. Note that this gives you the correct interior cbox. */
cbox= hypre_BoxArrayBox(cboxes, k);
hypre_IntersectBoxes(©_box, cbox, ©_box);
if (hypre_BoxVolume(©_box))
{
cfbox_mapping[i][k]= j;
fcbox_mapping[i][j]= k;
k++;
} /* if (hypre_BoxVolume(©_box)) */
} /* hypre_ForBoxI(j, fboxes) */
/* fill up the contracted box_array */
contract_fedgeBoxes[i]= hypre_BoxArrayCreate(0, ndim);
Edge_cstarts[i]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes));
upper_shifts[i]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes));
lower_shifts[i]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes));
hypre_ForBoxI(j, fboxes)
{
fbox= hypre_BoxArrayBox(fboxes, j);
/* contract the fbox to correspond to the correct cbox */
cbox= hypre_BoxContraction(fbox, var_fgrid, rfactor);
hypre_AppendBox(cbox, contract_fedgeBoxes[i]);
/* record the offset mapping between the coarse cell index and
the fine cell index */
hypre_ClearIndex(upper_shifts[i][j]);
hypre_ClearIndex(lower_shifts[i][j]);
for (l= 0; l< ndim; l++)
{
m= hypre_BoxIMin(cbox)[l];
p= m%rfactor[l];
if (p > 0 && m > 0)
{
upper_shifts[i][j][l]= p-1;
lower_shifts[i][j][l]= p-rfactor[l];
}
else
{
upper_shifts[i][j][l]= rfactor[l]-p-1;
lower_shifts[i][j][l]=-p;
}
}
/* record the cstarts of the cbox */
hypre_ProjectBox(cbox, zero_index, rfactor);
hypre_CopyIndex(hypre_BoxIMin(cbox), Edge_cstarts[i][j]);
hypre_StructMapFineToCoarse(Edge_cstarts[i][j], zero_index, rfactor,
Edge_cstarts[i][j]);
hypre_BoxDestroy(cbox);
}
} /* for (i= 0; i< nparts; i++) */
/* variable rank bounds for this processor */
n_CtoVbox = hypre_TAlloc(HYPRE_Int **, nparts);
CtoVboxnums = hypre_TAlloc(HYPRE_Int ***, nparts);
for (part= 0; part< nparts; part++)
{
hypre_SStructCellGridBoxNumMap(fgrid_edge, part, &n_CtoVbox[part],
&CtoVboxnums[part]);
}
/* variable rank bounds for this processor */
flower_ranks= hypre_TAlloc(HYPRE_Int *, nparts);
fupper_ranks= hypre_TAlloc(HYPRE_Int *, nparts);
clower_ranks= hypre_TAlloc(HYPRE_Int *, nparts);
cupper_ranks= hypre_TAlloc(HYPRE_Int *, nparts);
for (part= 0; part< nparts; part++)
{
flower_ranks[part]= hypre_CTAlloc(HYPRE_Int, tot_vars);
fupper_ranks[part]= hypre_CTAlloc(HYPRE_Int, tot_vars);
/* cell grid ranks */
p_fgrid= hypre_SStructGridPGrid(fgrid_element, part);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, 0);
box_array= hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_element, part, findex, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &flower_ranks[part][0],
matrix_type);
fbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &fupper_ranks[part][0],
matrix_type);
clower_ranks[part]= hypre_CTAlloc(HYPRE_Int, tot_vars);
cupper_ranks[part]= hypre_CTAlloc(HYPRE_Int, tot_vars);
p_cgrid= hypre_SStructGridPGrid(cgrid_element, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
box_array= hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_element, part, cindex, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &clower_ranks[part][0],
matrix_type);
cbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &cupper_ranks[part][0],
matrix_type);
/* face grid ranks */
p_fgrid= hypre_SStructGridPGrid(fgrid_face, part);
p_cgrid= hypre_SStructGridPGrid(cgrid_face, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (i= 0; i< nvars; i++)
{
t= vartypes[i];
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, i);
box_array= hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_face, part, findex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &flower_ranks[part][t],
matrix_type);
fbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_face, part, findex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &fupper_ranks[part][t],
matrix_type);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, i);
box_array= hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &clower_ranks[part][t],
matrix_type);
cbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &cupper_ranks[part][t],
matrix_type);
}
/* edge grid ranks */
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part);
p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (i= 0; i< nvars; i++)
{
t= vartypes[i];
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, i);
box_array= hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &flower_ranks[part][t],
matrix_type);
fbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(fbox), findex);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &fupper_ranks[part][t],
matrix_type);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, i);
box_array= hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(box_array, 0);
hypre_CopyIndex(hypre_BoxIMin(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &clower_ranks[part][t],
matrix_type);
cbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);
hypre_CopyIndex(hypre_BoxIMax(cbox), cindex);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, i,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &cupper_ranks[part][t],
matrix_type);
}
}
/* CREATE IJ_MATRICES- need to find the size of each one. Notice that the row
and col ranks of these matrices can be created using only grid information.
Grab the first part, first variable, first box, and lower index (lower rank);
Grab the last part, last variable, last box, and upper index (upper rank). */
/* Element_iedge- same for 2-d and 3-d */
/* lower rank */
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
var_cgrid= hypre_SStructPGridCellSGrid(p_cgrid) ;
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &ilower);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, 0, box, myproc, &entry);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, 0);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(fbox), &jlower);
/* upper rank */
part= nparts-1;
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
var_cgrid= hypre_SStructPGridCellSGrid(p_cgrid) ;
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, 0, hypre_BoxArraySize(cboxes)-1,
myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &iupper);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, nvars-1);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, hypre_BoxArraySize(fboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, nvars-1, hypre_BoxArraySize(fboxes)-1,
myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(fbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Element_iedge);
HYPRE_IJMatrixSetObjectType(Element_iedge, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(Element_iedge);
/* Edge_iedge. Note that even though not all the iedges are involved (e.g.,
* truncated edges are not), we use the ranks determined by the Edge/edge grids.
* Same for 2-d and 3-d. */
/* lower rank */
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, 0, box, myproc, &entry);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, 0);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(fbox), &jlower);
/* upper rank */
part= nparts-1;
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &iupper);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, nvars-1);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, hypre_BoxArraySize(fboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, nvars-1,
hypre_BoxArraySize(fboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(fbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Edge_iedge);
HYPRE_IJMatrixSetObjectType(Edge_iedge, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(Edge_iedge);
/* edge_Edge. Same for 2-d and 3-d. */
/* lower rank */
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, 0, box, myproc, &entry);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, 0);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(fbox), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &jlower);
/* upper rank */
part= nparts-1;
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, nvars-1);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, hypre_BoxArraySize(fboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, nvars-1,
hypre_BoxArraySize(fboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(fbox), &iupper);
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &edge_Edge);
HYPRE_IJMatrixSetObjectType(edge_Edge , HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(edge_Edge);
/* Face_iedge. Only needed in 3-d. */
if (ndim == 3)
{
/* lower rank */
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_face, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, 0, box, myproc, &entry);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, 0);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(fbox), &jlower);
/* upper rank */
part= nparts-1;
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_face, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &iupper);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_fgrid);
var_fgrid= hypre_SStructPGridSGrid(p_fgrid, nvars-1);
fboxes = hypre_StructGridBoxes(var_fgrid);
fbox = hypre_BoxArrayBox(fboxes, hypre_BoxArraySize(fboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, nvars-1,
hypre_BoxArraySize(fboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(fbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Face_iedge);
HYPRE_IJMatrixSetObjectType(Face_iedge, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(Face_iedge);
}
/* Element_Face. Only for 3-d since Element_Edge= Element_Face in 2-d. */
/* lower rank */
if (ndim == 3)
{
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, 0, box,
myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_face, part, 0, box,
myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &jlower);
/* upper rank */
part= nparts-1;
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &iupper);
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_face, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Element_Face);
HYPRE_IJMatrixSetObjectType(Element_Face, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(Element_Face);
}
/* Element_Edge. Same for 2-d and 3-d. */
/* lower rank */
part= 0;
box = 0;
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, 0, box, myproc, &entry);
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, 0);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(cbox), &jlower);
/* upper rank */
part= nparts-1;
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_element, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &iupper);
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
nvars = hypre_SStructPGridNVars(p_cgrid);
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1);
cboxes = hypre_StructGridBoxes(var_cgrid);
cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, nvars-1,
hypre_BoxArraySize(cboxes)-1, myproc, &entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Element_Edge);
HYPRE_IJMatrixSetObjectType(Element_Edge, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(Element_Edge);
/*------------------------------------------------------------------------------
* fill up the parcsr matrices.
*------------------------------------------------------------------------------*/
/* count the number of connections, i.e., the columns
* no. of interior edges per face, or no. of interior edges per cell.
* Need to distinguish between 2 and 3-d. */
if (ndim == 3)
{
n_xFace_iedges= (rfactor[1]-1)*rfactor[2] + (rfactor[2]-1)*rfactor[1];
n_yFace_iedges= (rfactor[0]-1)*rfactor[2] + (rfactor[2]-1)*rfactor[0];
n_zFace_iedges= (rfactor[1]-1)*rfactor[0] + (rfactor[0]-1)*rfactor[1];
n_Cell_iedges = (rfactor[2]-1)*n_zFace_iedges +
rfactor[2]*(rfactor[0]-1)*(rfactor[1]-1);
nFaces_iedges = nxFaces*n_xFace_iedges + nyFaces*n_yFace_iedges +
nzFaces*n_zFace_iedges;
nElements_iedges= nElements * n_Cell_iedges;
}
else
{
n_Cell_iedges = (rfactor[0]-1)*rfactor[1] + (rfactor[1]-1)*rfactor[0];
nElements_iedges= nElements * n_Cell_iedges;
}
if (ndim == 3)
{
iFace = hypre_CTAlloc(HYPRE_Int, nFaces);
}
iEdge = hypre_CTAlloc(HYPRE_Int, nEdges);
iElement= hypre_CTAlloc(HYPRE_Int, nElements);
/* array structures needed for forming ij_matrices */
/* Element_edge. Same for 2-d and 3-d. */
ncols_Elementedge= hypre_CTAlloc(HYPRE_Int, nElements);
for (i= 0; i< nElements; i++)
{
ncols_Elementedge[i]= n_Cell_iedges;
}
jElement_edge = hypre_CTAlloc(HYPRE_Int, nElements_iedges);
vals_Elementedge = hypre_CTAlloc(HYPRE_Real, nElements_iedges);
/*---------------------------------------------------------------------------
* Fill up the row/column ranks of Element_edge. Will need to distinguish
* between 2-d and 3-d.
* Loop over the coarse element grid
* a) Refine the coarse cell and grab the fine cells that will contain
* the fine edges.
* To obtain the correct coarse-to-fine cell index mapping, we
* map lindex to the fine cell grid and then adjust
* so that the final mapped fine cell is the one on the upper
* corner of the agglomerate. Will need to determine the fine box
* corresponding to the coarse box.
* b) loop map these fine cells and find the ranks of the fine edges.
*---------------------------------------------------------------------------*/
nElements= 0;
nElements_iedges= 0;
for (part= 0; part< nparts; part++)
{
if (ndim == 3)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */
Edge_nvars = hypre_SStructPGridNVars(p_cgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
}
else if (ndim == 2) /* edge is a face in 2-d*/
{
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part); /* Face grid */
Face_nvars = hypre_SStructPGridNVars(p_cgrid);
Face_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
}
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part); /* ccell grid */
var_cgrid = hypre_SStructPGridCellSGrid(p_cgrid);
cboxes = hypre_StructGridBoxes(var_cgrid);
p_fgrid = hypre_SStructGridPGrid(fgrid_element, part); /* fcell grid */
var_fgrid = hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_BoxGetSize(cbox, loop_size);
hypre_CopyIndex(hypre_BoxIMin(cbox), cstart);
/* determine which fine box cbox has coarsened from. Obtained from
cfbox_mapping. */
fboxi= cfbox_mapping[part][i];
fbox = hypre_BoxArrayBox(fboxes, fboxi);
/**********************************************************************
* determine the shift to get the correct c-to-f cell index map:
* d= hypre_BoxIMin(fbox)[j]%rfactor[j]*sign(hypre_BoxIMin(fbox)[j])
* stride[j]= d-1 if d>0
* stride[j]= rfactor[j]-1+d if d<=0.
* This is upper_shifts[part][fboxi].
**********************************************************************/
hypre_ClearIndex(stride);
hypre_CopyIndex(upper_shifts[part][fboxi], stride);
/* loop over each cell and find the row rank of Element_edge and then
the column ranks of the connected fine edges. */
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,cindex,findex,entry,rank,nElements,low_index,t,hi_index,var,m,k,j,var_index,nElements_iedges) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* refined cindex to get the correct upper fine index */
hypre_StructMapCoarseToFine(cindex, zero_index, rfactor, findex);
hypre_AddIndexes(findex, stride, 3, findex);
/* Element(i,j,k) rank */
hypre_SStructGridFindBoxManEntry(cgrid_element, part, cindex, 0, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type);
iElement[nElements]= rank;
nElements++;
/* Element_iedge columns: 3-d, x_edges, y_edges, and z_edges. */
if (ndim == 3)
{
hypre_SetIndex3(low_index, findex[0]-rfactor[0]+1,
findex[1]-rfactor[1]+1,
findex[2]-rfactor[2]+1);
for (t= 0; t< Edge_nvars; t++)
{
hypre_CopyIndex(findex, hi_index);
var= Edge_vartypes[t]; /* c & f edges enumerated the same */
/* determine looping extents over the refined cells that
will have fine edges. */
switch (var)
{
case 5: /* x_edges */
{
hi_index[1]-= 1;
hi_index[2]-= 1;
break;
}
case 6: /* y_edges */
{
hi_index[0]-= 1;
hi_index[2]-= 1;
break;
}
case 7: /* z_edges */
{
hi_index[0]-= 1;
hi_index[1]-= 1;
break;
}
} /* switch (var) */
/* column ranks. */
for (m= low_index[2]; m<= hi_index[2]; m++)
{
for (k= low_index[1]; k<= hi_index[1]; k++)
{
for (j= low_index[0]; j<= hi_index[0]; j++)
{
hypre_SetIndex3(var_index, j, k, m);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jElement_edge[nElements_iedges]= rank;
nElements_iedges++;
} /* for (j= findex[0]; j<= hi_index[0]; j++) */
} /* for (k= findex[1]; k<= hi_index[1]; k++) */
} /* for (m= findex[2]; m<= hi_index[2]; m++) */
} /* for (t= 0; t< Edge_nvars; t++) */
} /* if (ndim == 3) */
else if (ndim == 2) /* only x & y faces */
{
hypre_SetIndex3(low_index, findex[0]-rfactor[0]+1,
findex[1]-rfactor[1]+1,
findex[2]);
for (t= 0; t< Face_nvars; t++)
{
hypre_CopyIndex(findex, hi_index);
var= Face_vartypes[t]; /* c & f faces enumerated the same */
switch (var) /* note: hi_index computed differently in 2-d */
{
case 2: /* x_faces */
{
hi_index[0]-= 1;
break;
}
case 3: /* y_edges */
{
hi_index[1]-= 1;
break;
}
} /* switch (var) */
/* column ranks. */
for (k= low_index[1]; k<= hi_index[1]; k++)
{
for (j= low_index[0]; j<= hi_index[0]; j++)
{
hypre_SetIndex3(var_index, j, k, findex[2]);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jElement_edge[nElements_iedges]= rank;
nElements_iedges++;
} /* for (j= findex[0]; j<= hi_index[0]; j++) */
} /* for (k= findex[1]; k<= hi_index[1]; k++) */
} /* for (t= 0; t< Face_nvars; t++) */
} /* if (ndim == 2) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
} /* for (part= 0; part< nparts; part++) */
HYPRE_IJMatrixSetValues(Element_iedge, nElements, ncols_Elementedge,
(const HYPRE_Int*) iElement, (const HYPRE_Int*) jElement_edge,
(const HYPRE_Real*) vals_Elementedge);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) Element_iedge);
hypre_TFree(ncols_Elementedge);
hypre_TFree(jElement_edge);
hypre_TFree(vals_Elementedge);
/* Face_edge */
/*------------------------------------------------------------------------------
* Fill out Face_edge a row at a time. Since we have different Face types
* so that the size of the cols change depending on what type the Face
* is, we need to loop over the grids and take a count of the col elements.
* Loop over the coarse face grids and add up the number of interior edges.
* Will compute only for 3-d. In 2-d, these structures are obtained for
* Edge_edge.
*------------------------------------------------------------------------------*/
if (ndim == 3)
{
ncols_Faceedge = hypre_CTAlloc(HYPRE_Int, nFaces);
nFaces= 0;
j= 0;
for (part= 0; part< nparts; part++)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part); /* Face grid */
Face_nvars = hypre_SStructPGridNVars(p_cgrid);
Face_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid = hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
for (t= 0; t< Face_nvars; t++)
{
var= Face_vartypes[t];
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, t);
k= hypre_StructGridLocalSize(var_cgrid);
switch(var)
{
case 2: /* x_Faces (i,j,k) then (i-1,j,k), contain y,z edges */
{
for (i= 0; i< k; i++)
{
/* y_iedge connections to x_Face */
ncols_Faceedge[nFaces]= (rfactor[2]-1)*rfactor[1];
/* z_iedge connections to x_Face */
ncols_Faceedge[nFaces]+= rfactor[2]*(rfactor[1]-1);
j+= ncols_Faceedge[nFaces];
nFaces++;
}
break;
} /* case 2 */
case 3: /* y_Faces (i,j,k) then (i,j-1,k), contain x,z edges */
{
for (i= 0; i< k; i++)
{
/* x_iedge connections to y_Face */
ncols_Faceedge[nFaces]= (rfactor[2]-1)*rfactor[0];
/* z_iedge connections to y_Face */
ncols_Faceedge[nFaces]+= rfactor[2]*(rfactor[0]-1);
j+= ncols_Faceedge[nFaces];
nFaces++;
}
break;
} /* case 3 */
case 4: /* z_Faces (i,j,k) then (i,j,k-1), contain x,y edges */
{
for (i= 0; i< k; i++)
{
/* x_iedge connections to z_Face */
ncols_Faceedge[nFaces]= (rfactor[1]-1)*rfactor[0];
/* y_iedge connections to z_Face */
ncols_Faceedge[nFaces]+= rfactor[1]*(rfactor[0]-1);
j+= ncols_Faceedge[nFaces];
nFaces++;
}
break;
} /* case 4 */
} /* switch(var) */
} /* for (t= 0; t< Face_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
jFace_edge= hypre_CTAlloc(HYPRE_Int, j);
vals_Faceedge= hypre_CTAlloc(HYPRE_Real, j);
for (i= 0; i< j; i++)
{
vals_Faceedge[i]= 1.0;
}
/*---------------------------------------------------------------------------
* Fill up the row/column ranks of Face_edge.
* Loop over the coarse Cell grid
* a) for each Cell box, stretch to a Face box
* b) for each coarse face, if it is on the proc, map it to a
* coarse cell (add the variable offset).
* c) refine the coarse cell and grab the fine cells that will contain
* the fine edges. Refining requires a shifting dependent on the
* begining index of the fine box.
* d) map these fine cells to the fine edges.
*---------------------------------------------------------------------------*/
nFaces = 0;
nFaces_iedges= 0;
for (part= 0; part< nparts; part++)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part); /* Face grid */
Face_nvars = hypre_SStructPGridNVars(p_cgrid);
Face_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
for (t= 0; t< Face_nvars; t++)
{
var= Face_vartypes[t];
var_cgrid = hypre_SStructPGridCellSGrid(p_cgrid);
cboxes = hypre_StructGridBoxes(var_cgrid);
/* to eliminate comparisons, take the switch outside of the loop. */
switch(var)
{
case 2: /* x_Faces-> y_iedges, z_iedges */
{
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_CopyBox(cbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* determine which fine box cbox has coarsened from */
fboxi= cfbox_mapping[part][i];
fbox = hypre_BoxArrayBox(fboxes, fboxi);
/**********************************************************
* determine the shift to get the correct c-to-f cell
* index map. This is upper_shifts[part][fboxi].
**********************************************************/
hypre_ClearIndex(stride);
hypre_CopyIndex(upper_shifts[part][fboxi], stride);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,cindex,entry,rank,nFaces,cell_index,findex,j,ilower,k,var_index,nFaces_iedges) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type);
/* check if rank on proc before continuing */
if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var]))
{
iFace[nFaces]= rank;
nFaces++;
/* transform face index to cell index */
hypre_AddIndexes(cindex, varoffsets[var], 3, cell_index);
/* Refine the coarse cell to the upper fine index. The face will
be on the "lower end" fine cells, i.e., the slab box starting
with fine index cell_index. The fine edges will be on the
lower x of the fine cell, e.g., with fine cell (i,j,k),
y_iedge (i-1,j,k) & z_iedge (i-1,j,k). */
hypre_StructMapCoarseToFine(cell_index, zero_index,
rfactor, findex);
hypre_AddIndexes(findex, stride, 3, findex);
/* cell_index was refined to the upper fine index. Shift
back to the lower end, subtract (rfactor-1). */
for (j= 0; j< ndim; j++)
{
findex[j]-= rfactor[j]-1;
}
/* y_iedges */
ilower= findex[0]-1;
for (k= 0; k< rfactor[2]-1; k++)
{
for (j= 0; j< rfactor[1]; j++)
{
hypre_SetIndex3(var_index, ilower, j+findex[1], k+findex[2]);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
/* z_iedges */
for (k= 0; k< rfactor[2]; k++)
{
for (j= 0; j< rfactor[1]-1; j++)
{
hypre_SetIndex3(var_index, ilower, j+findex[1], k+findex[2]);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
} /* if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var])) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
break;
} /* case 2: x_Faces-> y_iedges, z_iedges */
case 3: /* y_Faces-> x_iedges, z_iedges */
{
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_CopyBox(cbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* determine which fine box cbox has coarsened from */
fboxi= cfbox_mapping[part][i];
fbox = hypre_BoxArrayBox(fboxes, fboxi);
/**********************************************************
* determine the shift to get the correct c-to-f cell
* index map. This is upper_shifts[part][fboxi].
**********************************************************/
hypre_ClearIndex(stride);
hypre_CopyIndex(upper_shifts[part][fboxi], stride);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,cindex,entry,rank,nFaces,cell_index,findex,j,ilower,k,var_index,nFaces_iedges) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type);
/* check if rank on proc before continuing */
if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var]))
{
iFace[nFaces]= rank;
nFaces++;
/* transform face index to cell index */
hypre_AddIndexes(cindex, varoffsets[var], 3, cell_index);
/* Refine the coarse cell to the upper fine index. The face will
be on the "lower end" fine cells, i.e., the slab box starting
with fine index cell_index. The fine edges will be on the
lower x of the fine cell, e.g., with fine cell (i,j,k),
y_iedge (i-1,j,k) & z_iedge (i-1,j,k). */
hypre_StructMapCoarseToFine(cell_index, zero_index,
rfactor, findex);
hypre_AddIndexes(findex, stride, 3, findex);
/* cell_index is refined to the upper fine index. Shift
back to the lower end, subtract (rfactor-1). */
for (j= 0; j< ndim; j++)
{
findex[j]-= rfactor[j]-1;
}
/* x_iedges */
ilower= findex[1]-1;
for (k= 0; k< rfactor[2]-1; k++)
{
for (j= 0; j< rfactor[0]; j++)
{
hypre_SetIndex3(var_index, j+findex[0], ilower, k+findex[2]);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
/* z_iedges */
for (k= 0; k< rfactor[2]; k++)
{
for (j= 0; j< rfactor[0]-1; j++)
{
hypre_SetIndex3(var_index, j+findex[0], ilower, k+findex[2]);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
} /* if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var])) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
break;
} /* case 3: y_Faces-> x_iedges, z_iedges */
case 4: /* z_Faces-> x_iedges, y_iedges */
{
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_CopyBox(cbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* determine which fine box cbox has coarsened from */
fboxi= cfbox_mapping[part][i];
fbox = hypre_BoxArrayBox(fboxes, fboxi);
/**********************************************************
* determine the shift to get the correct c-to-f cell
* index map. This is upper_shifts[part][fboxi].
**********************************************************/
hypre_ClearIndex(stride);
hypre_CopyIndex(upper_shifts[part][fboxi], stride);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,entry,rank,nFaces,cell_index,findex,j,ilower,k,var_index,nFaces_iedges) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type);
/* check if rank on proc before continuing */
if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var]))
{
iFace[nFaces]= rank;
nFaces++;
/* transform face index to cell index */
hypre_AddIndexes(cindex, varoffsets[var], 3, cell_index);
/* Refine the coarse cell to the upper fine index. The face will
be on the "lower end" fine cells, i.e., the slab box starting
with fine index cell_index. The fine edges will be on the
lower x of the fine cell, e.g., with fine cell (i,j,k),
y_iedge (i-1,j,k) & z_iedge (i-1,j,k). */
hypre_StructMapCoarseToFine(cell_index, zero_index,
rfactor, findex);
hypre_AddIndexes(findex, stride, 3, findex);
/* cell_index is refined to the upper fine index. Shift
back to the lower end, subtract (rfactor-1). */
for (j= 0; j< ndim; j++)
{
findex[j]-= rfactor[j]-1;
}
/* x_iedges */
ilower= findex[2]-1;
for (k= 0; k< rfactor[1]-1; k++)
{
for (j= 0; j< rfactor[0]; j++)
{
hypre_SetIndex3(var_index, j+findex[0], k+findex[1], ilower);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
/* y_iedges */
for (k= 0; k< rfactor[1]; k++)
{
for (j= 0; j< rfactor[0]-1; j++)
{
hypre_SetIndex3(var_index, j+findex[0], k+findex[1], ilower);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jFace_edge[nFaces_iedges]= rank;
nFaces_iedges++;
}
}
} /* if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var])) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
break;
} /* case 4: z_Faces-> x_iedges, y_iedges */
} /* switch(var) */
} /* for (t= 0; t< Face_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
HYPRE_IJMatrixSetValues(Face_iedge, nFaces, ncols_Faceedge,
(const HYPRE_Int*) iFace, (const HYPRE_Int*) jFace_edge,
(const HYPRE_Real*) vals_Faceedge);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) Face_iedge);
hypre_TFree(ncols_Faceedge);
hypre_TFree(iFace);
hypre_TFree(jFace_edge);
hypre_TFree(vals_Faceedge);
} /* if (ndim == 3) */
/* Edge_edge */
/*------------------------------------------------------------------------------
* Count the Edge_edge connections. Will need to distinguish 2-d and 3-d.
*------------------------------------------------------------------------------*/
/* nEdges should be correct for 2-d & 3-d */
ncols_Edgeiedge= hypre_CTAlloc(HYPRE_Int, nEdges);
nEdges= 0;
k= 0;
for (part= 0; part< nparts; part++)
{
/* Edge grid. In 2-d this will be the face grid, which is assumed to be
in cgrid_edge. */
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
Edge_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
Edge_nvars = hypre_SStructPGridNVars(p_cgrid);
for (t= 0; t< Edge_nvars; t++)
{
var= Edge_vartypes[t];
var_cgrid= hypre_SStructPGridSGrid(p_cgrid, t);
j= hypre_StructGridLocalSize(var_cgrid);
switch(var)
{
case 2: /* 2-d, x_Face */
{
m= rfactor[1];
break;
}
case 3: /* 2-d, y_Face */
{
m= rfactor[0];
break;
}
case 5: /* 3-d, x_Edge */
{
m= rfactor[0];
break;
}
case 6: /* 3-d, y_Edge */
{
m= rfactor[1];
break;
}
case 7: /* 3-d, z_Edge */
{
m= rfactor[2];
break;
}
}
for (i= nEdges; i< nEdges+j; i++) /*fill in the column size for Edge */
{
ncols_Edgeiedge[i]= m;
k+= m;
}
nEdges+= j;
} /* for (t= 0; t< Edge_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
jEdge_iedge= hypre_CTAlloc(HYPRE_Int, k);
vals_Edgeiedge= hypre_CTAlloc(HYPRE_Real, k);
for (i= 0; i< k; i++)
{
vals_Edgeiedge[i]= 1.0;
}
/*---------------------------------------------------------------------------
* Fill up the row/column ranks of Edge_edge. Since a refinement of the
* coarse edge index does not get the correct fine edge index, we need to
* map it to the cell grid. Recall, all variable grids are gotten by coarsening
* a cell centred grid.
* Loop over the coarse Cell grid
* a) for each Cell box, map to an Edge box
* b) for each coarse Edge on my proc , map it to a coarse cell
* (add the variable offset).
* c) refine the coarse cell and grab the fine cells that will contain
* the fine edges.
* d) map these fine cells to the fine edges.
*---------------------------------------------------------------------------*/
nEdges = 0;
nEdges_iedges= 0;
for (part= 0; part< nparts; part++)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part);
Edge_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
Edge_nvars = hypre_SStructPGridNVars(p_cgrid);
p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid = hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
for (t= 0; t< Edge_nvars; t++)
{
var= Edge_vartypes[t];
var_cgrid= hypre_SStructPGridCellSGrid(p_cgrid);
cboxes = hypre_StructGridBoxes(var_cgrid);
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
/*-------------------------------------------------------------------
* extract the variable box by offsetting with var_offset. Note that
* this may lead to a bigger variable domain than is on this proc.
* Off-proc Edges will be checked to eliminate this problem.
*-------------------------------------------------------------------*/
hypre_CopyBox(cbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* determine which fine box cbox has coarsened from */
fboxi= cfbox_mapping[part][i];
fbox = hypre_BoxArrayBox(fboxes, fboxi);
/**********************************************************
* determine the shift to get the correct c-to-f cell
* index map. This is upper_shifts[part][fboxi].
**********************************************************/
hypre_ClearIndex(stride);
hypre_CopyIndex(upper_shifts[part][fboxi], stride);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,entry,rank,nEdges,cell_index,findex,j,var_index,m,entry,rank,nEdges_iedges) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
/* row rank */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
/* check if rank on proc before continuing */
if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var]))
{
iEdge[nEdges]= rank;
nEdges++;
hypre_AddIndexes(cindex, varoffsets[var], 3, cell_index);
/* refine cindex and then map back to variable index */
hypre_StructMapCoarseToFine(cell_index, zero_index, rfactor,
findex);
hypre_AddIndexes(findex, stride, 3, findex);
/* cell_index is refined to the upper fine index. Shift
back to the lower end, subtract (rfactor-1). */
for (j= 0; j< ndim; j++)
{
findex[j]-= rfactor[j]-1;
}
hypre_SubtractIndexes(findex, varoffsets[var], 3, var_index);
switch(var)
{
case 2: /* 2-d, x_face */
{
for (m= 0; m< rfactor[1]; m++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part,
var_index, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jEdge_iedge[nEdges_iedges]= rank;
nEdges_iedges++;
/* increment the x component to get the next one in the
refinement cell. */
var_index[1]++;
}
break;
}
case 3: /* 2-d, y_face */
{
for (m= 0; m< rfactor[0]; m++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part,
var_index, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jEdge_iedge[nEdges_iedges]= rank;
nEdges_iedges++;
/* increment the y component to get the next one in the
refinement cell. */
var_index[0]++;
}
break;
}
case 5: /* 3-d, x_edge */
{
for (m= 0; m< rfactor[0]; m++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part,
var_index, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jEdge_iedge[nEdges_iedges]= rank;
nEdges_iedges++;
/* increment the x component to get the next one in the
refinement cell. */
var_index[0]++;
}
break;
}
case 6: /* 3-d, y_edge */
{
for (m= 0; m< rfactor[1]; m++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part,
var_index, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jEdge_iedge[nEdges_iedges]= rank;
nEdges_iedges++;
/* increment the y component to get the next one in the
refinement cell. */
var_index[1]++;
}
break;
}
case 7: /* 3-d, z_edge */
{
for (m= 0; m< rfactor[2]; m++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part,
var_index, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index,
&rank, matrix_type);
jEdge_iedge[nEdges_iedges]= rank;
nEdges_iedges++;
/* increment the z component to get the next one in the
refinement cell. */
var_index[2]++;
}
break;
}
} /* switch(var) */
} /* if ((rank <= cupper_ranks[part][var]) &&
(rank >= clower_ranks[part][var])) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
} /* for (t= 0; t< Edge_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
HYPRE_IJMatrixSetValues(Edge_iedge, nEdges, ncols_Edgeiedge,
(const HYPRE_Int*) iEdge, (const HYPRE_Int*) jEdge_iedge,
(const HYPRE_Real*) vals_Edgeiedge);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) Edge_iedge);
hypre_TFree(ncols_Edgeiedge);
hypre_TFree(iEdge);
hypre_TFree(jEdge_iedge);
hypre_TFree(vals_Edgeiedge);
/* Element_Face & Element_Edge. Element_Face only for 3-d. */
if (ndim == 3)
{
ncols_ElementFace= hypre_CTAlloc(HYPRE_Int, nElements);
j= 2*ndim;
for (i= 0; i< nElements; i++)
{
ncols_ElementFace[i]= j; /* 3-dim -> 6 */
}
j*= nElements;
jElement_Face = hypre_CTAlloc(HYPRE_Int, j);
vals_ElementFace= hypre_CTAlloc(HYPRE_Real, j);
for (i= 0; i< j; i++)
{
vals_ElementFace[i]= 1.0;
}
}
ncols_ElementEdge= hypre_CTAlloc(HYPRE_Int, nElements);
j= 2*ndim;
k= (ndim-1)*j;
for (i= 0; i< nElements; i++)
{
ncols_ElementEdge[i]= k; /* 2-dim -> 4; 3-dim -> 12 */
}
k*= nElements;
jElement_Edge = hypre_CTAlloc(HYPRE_Int, k);
vals_ElementEdge= hypre_CTAlloc(HYPRE_Real, k);
for (i= 0; i< k; i++)
{
vals_ElementEdge[i]= 1.0;
}
/*---------------------------------------------------------------------------
* Fill up the column ranks of ELement_Face and Element_Edge. Note that the
* iElement has alrady been formed when filling Element_edge.
*---------------------------------------------------------------------------*/
nElements_Faces= 0;
nElements_Edges= 0;
for (part= 0; part< nparts; part++)
{
/* grab the nvars & vartypes for the face and edge variables */
if (ndim == 3)
{
p_cgrid = hypre_SStructGridPGrid(cgrid_face, part);
Face_nvars = hypre_SStructPGridNVars(p_cgrid);
Face_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
}
p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */
Edge_nvars = hypre_SStructPGridNVars(p_cgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_cgrid);
p_cgrid = hypre_SStructGridPGrid(cgrid_element, part); /* cell grid */
var_cgrid = hypre_SStructPGridCellSGrid(p_cgrid);
cboxes = hypre_StructGridBoxes(var_cgrid);
if (ndim == 3)
{
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_BoxGetSize(cbox, loop_size);
hypre_CopyIndex(hypre_BoxIMin(cbox), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,cindex,t,var,entry,rank,nElements_Faces,var_index) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
/*-------------------------------------------------------------
* jElement_Face: (i,j,k) then (i-1,j,k), (i,j-1,k), (i,j,k-1).
*-------------------------------------------------------------*/
for (t= 0; t< Face_nvars; t++)
{
var= Face_vartypes[t];
hypre_SStructGridFindBoxManEntry(cgrid_face, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Face[nElements_Faces]= rank;
nElements_Faces++;
hypre_SubtractIndexes(cindex, varoffsets[var], 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_face, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Face[nElements_Faces]= rank;
nElements_Faces++;
}
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
} /* if (ndim == 3) */
/*-------------------------------------------------------------------
* jElement_Edge:
* 3-dim
* x_Edge: (i,j,k) then (i,j-1,k), (i,j-1,k-1), (i,j,k-1)
* y_Edge: (i,j,k) then (i-1,j,k), (i-1,j,k-1), (i,j,k-1)
* z_Edge: (i,j,k) then (i,j-1,k), (i-1,j-1,k), (i-1,j,k)
*
* 2-dim
* x_Edge or x_Face: (i,j) then (i-1,j)
* y_Edge or y_Face: (i,j) then (i,j-1)
*-------------------------------------------------------------------*/
hypre_ForBoxI(i, cboxes)
{
cbox= hypre_BoxArrayBox(cboxes, i);
hypre_BoxGetSize(cbox, loop_size);
hypre_CopyIndex(hypre_BoxIMin(cbox), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,cindex,t,var,entry,rank,nElements_Edges,var_index) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(cindex, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(cindex, start, 3, cindex);
for (t= 0; t< Edge_nvars; t++)
{
/* Edge (i,j,k) */
var= Edge_vartypes[t];
switch (var)
{
case 2: /* x_Face= {(i,j), (i-1,j)} */
{
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
break;
}
case 3: /* y_Face= {(i,j), (i,j-1)} */
{
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
break;
}
case 5: /* "/" x_Edge={(i,j,k),(i,j-1,k),(i,j-1,k-1),(i,j,k-1)} */
{
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_AddIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
break;
}
case 6: /* "-" y_Edge={(i,j,k),(i-1,j,k),(i-1,j,k-1),(i,j,k-1)}*/
{
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
break;
}
case 7: /* "|" z_Edge={(i,j,k),(i,j-1,k),(i-1,j-1,k),(i-1,j,k)}*/
{
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_SubtractIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
hypre_AddIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jElement_Edge[nElements_Edges]= rank;
nElements_Edges++;
break;
}
} /* switch (var) */
} /* for (t= 0; t< Edge_nvars; t++) */
}
hypre_BoxLoop0End();
} /* hypre_ForBoxI(i, cboxes) */
} /* for (part= 0; part< nparts; part++) */
if (ndim == 3)
{
HYPRE_IJMatrixSetValues(Element_Face, nElements, ncols_ElementFace,
(const HYPRE_Int*) iElement, (const HYPRE_Int*) jElement_Face,
(const HYPRE_Real*) vals_ElementFace);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) Element_Face);
hypre_TFree(ncols_ElementFace);
hypre_TFree(jElement_Face);
hypre_TFree(vals_ElementFace);
} /* if (ndim == 3) */
HYPRE_IJMatrixSetValues(Element_Edge, nElements, ncols_ElementEdge,
(const HYPRE_Int*) iElement, (const HYPRE_Int*) jElement_Edge,
(const HYPRE_Real*) vals_ElementEdge);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) Element_Edge);
hypre_TFree(ncols_ElementEdge);
hypre_TFree(iElement);
hypre_TFree(jElement_Edge);
hypre_TFree(vals_ElementEdge);
/*-----------------------------------------------------------------------
* edge_Edge, the actual interpolation matrix.
* For each fine edge row, we need to know if it is a edge,
* boundary edge, or face edge. Knowing this allows us to determine the
* structure and weights of the interpolation matrix.
*
* Scheme:A.Loop over contracted boxes of fine edge grid.
* For each fine edge ijk,
* 1) map it to a fine cell with the fine edge at the lower end
* of the box,e.g. x_edge[ijk] -> cell[i,j+1,k+1].
* 2) coarsen the fine cell to obtain a coarse cell. Determine the
* location of the fine edge with respect to the coarse edges
* of this cell. Coarsening needed only when determining the
* column rank.
*
* Need to distinguish between 2-d and 3-d.
*-----------------------------------------------------------------------*/
/* count the row/col connections */
iedgeEdge = hypre_CTAlloc(HYPRE_Int, nedges);
ncols_edgeEdge= hypre_CTAlloc(HYPRE_Int, nedges);
/*-----------------------------------------------------------------------
* loop first over the fedges aligning with the agglomerate coarse edges.
* Will loop over the face & interior edges separately also.
* Since the weights for these edges will be used to determine the
* weights along the face edges, we need to retrieve these computed
* weights from vals_edgeEdge. Done by keeping a pointer of size nedges
* that points to the location of the weight:
* pointer[rank of edge]= index location where weight resides.
*-----------------------------------------------------------------------*/
j= 0;
start_rank1= hypre_SStructGridStartRank(fgrid_edge);
bdryedge_location= hypre_CTAlloc(HYPRE_Int, nedges);
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid = hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array = hypre_StructGridBoxes(var_fgrid);
n_boxoffsets= ndim-1;
boxoffset = hypre_CTAlloc(hypre_Index, n_boxoffsets);
suboffset = hypre_CTAlloc(hypre_Index, n_boxoffsets);
switch(var)
{
case 2: /* 2-d: x_face (vertical edges), stride=[rfactor[0],1,1] */
{
hypre_SetIndex3(stride, rfactor[0], 1, 1);
hypre_CopyIndex(varoffsets[2], var_index);
/* boxoffset shrink in the i direction */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 0, 0);
break;
}
case 3: /* 2-d: y_face (horizontal edges), stride=[1,rfactor[1],1] */
{
hypre_SetIndex3(stride, 1, rfactor[1], 1);
hypre_CopyIndex(varoffsets[3], var_index);
/* boxoffset shrink in the j direction */
hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0);
hypre_SetIndex3(suboffset[0], 0, 1, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 0, 1, 0);
break;
}
case 5: /* 3-d: x_edge, stride=[1,rfactor[1],rfactor[2]] */
{
hypre_SetIndex3(stride, 1, rfactor[1], rfactor[2]);
hypre_CopyIndex(varoffsets[5], var_index);
/* boxoffset shrink in the j & k directions */
hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0);
hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1);
hypre_SetIndex3(suboffset[0], 0, 1, 0);
hypre_SetIndex3(suboffset[1], 0, 0, 1);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 0, 1, 1);
break;
}
case 6: /* 3-d: y_edge, stride=[rfactor[0],1,rfactor[2]] */
{
hypre_SetIndex3(stride, rfactor[0], 1, rfactor[2]);
hypre_CopyIndex(varoffsets[6], var_index);
/* boxoffset shrink in the i & k directions */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
hypre_SetIndex3(suboffset[1], 0, 0, 1);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 0, 1);
break;
}
case 7: /* 3-d: z_edge, stride=[rfactor[0],rfactor[1],1] */
{
hypre_SetIndex3(stride, rfactor[0], rfactor[1], 1);
hypre_CopyIndex(varoffsets[7], var_index);
/* boxoffset shrink in the i & j directions */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(boxoffset[1], 0, rfactor[1]-1, 0);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
hypre_SetIndex3(suboffset[1], 0, 1, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 1, 0);
break;
}
}
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/* the adjusted variable box may be bigger than the actually
variable box- variables that are shared may lead to smaller
variable boxes than the SubtractIndex produces. If the box
has to be decreased, then we decrease it by (rfactor[j]-1)
in the appropriate direction.
Check the location of the shifted lower box index. */
for (k= 0; k< n_boxoffsets; k++)
{
hypre_SubtractIndexes(hypre_BoxIMin(©_box), suboffset[k], 3,
findex);
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[k], 3,
hypre_BoxIMin(©_box));
}
}
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, stride,
loop_size);
/* extend the loop_size so that upper boundary of the box are reached. */
hypre_AddIndexes(loop_size, hi_index, 3, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, stride, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= stride[k];
}
hypre_AddIndexes(findex, start, 3, findex);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &rank, matrix_type);
/* still row p may be outside the processor- check to make sure in */
if ((rank <= fupper_ranks[part][var]) && (rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
ncols_edgeEdge[j]= 1;
bdryedge_location[rank-start_rank1]= j;
j++;
}
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI */
hypre_TFree(boxoffset);
hypre_TFree(suboffset);
} /* for (t= 0; t< nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
/*-----------------------------------------------------------------------
* Record the row ranks for the face edges. Only for 3-d.
*
* Loop over the face edges.
* Since the weights for these edges will be used to determine the
* weights along the face edges, we need to retrieve these computed
* weights form vals_edgeEdge. Done by keeping a pointer of size nedges
* that points to the location of the weight:
* pointer[rank of edge]= index location where weight resides.
*-----------------------------------------------------------------------*/
if (ndim == 3)
{
l= j;
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
/* may need to shrink a given box in some boxoffset directions */
boxoffset= hypre_TAlloc(hypre_Index, ndim);
for (t= 0; t< ndim; t++)
{
hypre_ClearIndex(boxoffset[t]);
hypre_IndexD(boxoffset[t], t)= rfactor[t]-1;
}
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array= hypre_StructGridBoxes(var_fgrid);
/* to reduce comparison, take the switch outside of the loop */
switch(var)
{
case 5:
{
/* 3-d x_edge, can be Y or Z_Face */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
/* adjust the contracted cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* x_edge-> Z_Face & Y_Face:
* Z_Face- contract in the z direction only if the
* processor interface is in the z direction
* Y_Face- contract in the y direction if the processor
* interface is in the y direction.
******************************************************/
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[2], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* increase the loop_size by one in the Z plane direction */
loop_size[2]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/************************************************************
* Loop over the Z_Face x_edges.
************************************************************/
for (p= 0; p< rfactor[0]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[0]+= p;
for (n= 1; n< rfactor[1]; n++)
{
var_index[1]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* still row rank may be outside the processor */
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* Z_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2; /* two weight values */
}
} /* for (n= 1; n< rfactor[1]; n++) */
} /* for (p= 0; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(m);
/* Y_Face */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[1], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[1]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j,l) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/************************************************************
* Loop over the Y_Face x_edges.
************************************************************/
for (p= 0; p< rfactor[0]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[0]+= p;
for (n= 1; n< rfactor[2]; n++)
{
var_index[2]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* Y_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2;
}
} /* for (n= 1; n< rfactor[2]; n++) */
} /* for (p= 0; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 6:
{
/* 3-d y_edge, can be X or Z_Face */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* y_edge-> X_Face & Z_Face:
* Z_Face- contract in the z direction only if the
* processor interface is in the z direction
* X_Face- contract in the x direction if the processor
* interface is in the x direction.
******************************************************/
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[2], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* reset and then increase the loop_size by one in the Z_Face direction */
loop_size[2]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j,l) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* Z_Face */
for (p= 0; p< rfactor[1]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* Z_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2;
}
} /* for (n= 1; n< rfactor[0]; n++) */
} /* for (p= 0; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
/* X_Face */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[0], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[0]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j,l) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/*****************************************************
* Loop over the X_Face y_edges.
*****************************************************/
for (p= 0; p< rfactor[1]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 1; n< rfactor[2]; n++)
{
var_index[2]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* X_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2;
}
} /* for (n= 1; n< rfactor[2]; n++) */
} /* for (p= 0; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 7:
{
/* 3-d z_edge, can be interior, X or Y_Face, or Z_Edge */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* z_edge-> X_Face & Y_Face:
* X_Face- contract in the x direction if the processor
* interface is in the x direction.
* Y_Face- contract in the y direction if the processor
* interface is in the y direction.
******************************************************/
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[0], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* increase the loop_size by one in the X_Face direction */
loop_size[0]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j,l) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/******************************************************
* Loop over the X_Face z_edges.
******************************************************/
for (p= 0; p< rfactor[2]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[1]; n++)
{
var_index[1]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* X_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2;
}
} /* for (n= 1; n< rfactor[1]; n++) */
} /* for (p= 0; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
/* Y_Face */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[1], 3,
hypre_BoxIMin(©_box));
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[1]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j,l) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/****************************************************
* Loop over the Y_Face z_edges.
****************************************************/
for (p= 0; p< rfactor[2]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
if ((rank <= fupper_ranks[part][var]) &&
(rank >= flower_ranks[part][var]))
{
iedgeEdge[j]= rank;
/* Y_Face. Two coarse Edge connections. */
ncols_edgeEdge[j]= 2;
j++;
/* record index location */
bdryedge_location[rank-start_rank1]= l;
l+= 2;
}
} /* for (n= 1; n< rfactor[0]; n++) */
} /* for (p= 0; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
} /* switch */
} /* for (t= 0; t< Edge_nvars; t++) */
hypre_TFree(boxoffset);
} /* for (part= 0; part< nparts; part++) */
} /* if (ndim == 3) */
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array= hypre_StructGridBoxes(var_fgrid);
/* to reduce comparison, take the switch outside of the loop */
switch(var)
{
case 2:
{
/* 2-d x_face = x_edge, can be interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
/* adjust the contracted cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* get interior edges */
for (p= 1; p< rfactor[0]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[0]+= p;
for (n= 0; n< rfactor[1]; n++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
iedgeEdge[j]= rank;
/* lies interior of Face. Four coarse Edge connection. */
ncols_edgeEdge[j]= 4;
j++;
var_index[1]++;
} /* for (n= 0; n< rfactor[1]; n++) */
} /* for (p= 1; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 3:
{
/* 2-d y_face = y_edge, can be interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* get interior edges */
for (p= 1; p< rfactor[1]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 0; n< rfactor[0]; n++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
iedgeEdge[j]= rank;
/* lies interior of Face. Four coarse Edge connection. */
ncols_edgeEdge[j]= 4;
j++;
var_index[0]++;
} /* for (n= 0; n< rfactor[0]; n++) */
} /* for (p= 1; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 5:
{
/* 3-d x_edge, can be only interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* get interior edges */
for (p= 1; p< rfactor[2]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[1]; n++)
{
var_index[1]++;
for (k= 0; k< rfactor[0]; k++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
iedgeEdge[j]= rank;
/* Interior. Twelve coarse Edge connections. */
ncols_edgeEdge[j]= 12;
j++;
var_index[0]++;
} /* for (k= 0; k< rfactor[0]; k++) */
/* reset var_index[0] to the initial index for next k loop */
var_index[0]-= rfactor[0];
} /* for (n= 1; n< rfactor[1]; n++) */
/* reset var_index[1] to the initial index for next n loop */
var_index[1]-= (rfactor[1]-1);
} /* for (p= 1; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 6:
{
/* 3-d y_edge, can be only interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
/* adjust the contract cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* get interior edges */
for (p= 1; p< rfactor[2]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
for (k= 0; k< rfactor[1]; k++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
iedgeEdge[j]= rank;
/* Interior. Twelve coarse Edge connections. */
ncols_edgeEdge[j]= 12;
j++;
var_index[1]++;
} /* for (k= 0; k< rfactor[1]; k++) */
/* reset var_index[1] to the initial index for next k loop */
var_index[1]-= rfactor[1];
} /* for (n= 1; n< rfactor[0]; n++) */
/* reset var_index[0] to the initial index for next n loop */
var_index[0]-= (rfactor[0]-1);
} /* for (p= 1; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 7:
{
/* 3-d z_edge, can be only interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
/* adjust the contracted cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,rank,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (k= 0; k< 3; k++)
{
findex[k]*= rfactor[k];
}
hypre_AddIndexes(findex, start, 3, findex);
/* get interior edges */
for (p= 1; p< rfactor[1]; p++)
{
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
for (k= 0; k< rfactor[2]; k++)
{
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
iedgeEdge[j]= rank;
/* Interior. Twelve coarse Edge connections. */
ncols_edgeEdge[j]= 12;
j++;
var_index[2]++;
} /* for (k= 0; k< rfactor[2]; k++) */
/* reset var_index[2] to the initial index for next k loop */
var_index[2]-= rfactor[2];
} /* for (n= 1; n< rfactor[0]; n++) */
/* reset var_index[0] to the initial index for next n loop */
var_index[0]-= (rfactor[0]-1);
} /* for (p= 1; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
} /* switch */
} /* for (t= 0; t< Edge_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
k= 0;
j= 0;
for (i= 0; i< nedges; i++)
{
if (ncols_edgeEdge[i])
{
k+= ncols_edgeEdge[i];
j++;
}
}
vals_edgeEdge = hypre_CTAlloc(HYPRE_Real, k);
jedge_Edge = hypre_CTAlloc(HYPRE_Int, k);
size1 = j;
/*********************************************************************
* Fill up the edge_Edge interpolation matrix. Interpolation weights
* are determined differently for each type of fine edges.
*
* fedge_on_CEdge: use geometric interpolation, i.e., length of
* edge ratio.
*
* fedge_on_agglomerate_face: box mg approach. Collapse the like
* variable stencil connections of the given face. Weighted linear
* interpolation of the fedge_on_CEdge values.
*
* fedge_in_agglomerate_interior: amge.
*********************************************************************/
/* loop over fedges aligning with the agglomerate coarse edges first. */
k= 0;
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array= hypre_StructGridBoxes(var_fgrid);
n_boxoffsets= ndim-1;
boxoffset = hypre_CTAlloc(hypre_Index, n_boxoffsets);
suboffset = hypre_CTAlloc(hypre_Index, n_boxoffsets);
switch(var)
{
case 2: /* 2-d: x_face (vertical edges), stride=[rfactor[0],1,1]
fCedge_ratio= 1.0/rfactor[1] */
{
hypre_SetIndex3(stride, rfactor[0], 1, 1);
fCedge_ratio= 1.0/rfactor[1];
/* boxoffset shrink in the i direction */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 0, 0);
break;
}
case 3: /* 2-d: y_face (horizontal edges), stride=[1,rfactor[1],1]
fCedge_ratio= 1.0/rfactor[0] */
{
hypre_SetIndex3(stride, 1, rfactor[1], 1);
fCedge_ratio= 1.0/rfactor[0];
/* boxoffset shrink in the j direction */
hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0);
hypre_SetIndex3(suboffset[0], 0, 1, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 0, 1, 0);
break;
}
case 5: /* 3-d: x_edge, stride=[1,rfactor[1],rfactor[2]]
fCedge_ratio= 1.0/rfactor[0] */
{
hypre_SetIndex3(stride, 1, rfactor[1], rfactor[2]);
fCedge_ratio= 1.0/rfactor[0];
/* boxoffset shrink in the j & k directions */
hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0);
hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1);
hypre_SetIndex3(suboffset[0], 0, 1, 0);
hypre_SetIndex3(suboffset[1], 0, 0, 1);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 0, 1, 1);
break;
}
case 6: /* 3-d: y_edge, stride=[rfactor[0],1,rfactor[2]]
fCedge_ratio= 1.0/rfactor[1] */
{
hypre_SetIndex3(stride, rfactor[0], 1, rfactor[2]);
fCedge_ratio= 1.0/rfactor[1];
/* boxoffset shrink in the i & k directions */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
hypre_SetIndex3(suboffset[1], 0, 0, 1);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 0, 1);
break;
}
case 7: /* 3-d: z_edge, stride=[rfactor[0],rfactor[1],1]
fCedge_ratio= 1.0/rfactor[2] */
{
hypre_SetIndex3(stride, rfactor[0], rfactor[1], 1);
fCedge_ratio= 1.0/rfactor[2];
/* boxoffset shrink in the i & j directions */
hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0);
hypre_SetIndex3(boxoffset[1], 0, rfactor[1]-1, 0);
hypre_SetIndex3(suboffset[0], 1, 0, 0);
hypre_SetIndex3(suboffset[1], 0, 1, 0);
/* extend loop_size by one in the stride direction */
hypre_SetIndex3(hi_index, 1, 1, 0);
break;
}
}
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the contracted cellbox to the variable box.
Note that some of the fboxes may be skipped because they
vanish. */
hypre_CopyBox(cellbox, ©_box);
for (j= 0; j< n_boxoffsets; j++)
{
hypre_SubtractIndexes(hypre_BoxIMin(©_box), suboffset[j], 3,
findex);
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[j], 3,
hypre_BoxIMin(©_box));
/* also modify cstart */
hypre_AddIndexes(boxoffset[j], one_index, 3, boxoffset[j]);
hypre_StructMapFineToCoarse(boxoffset[j], zero_index, rfactor,
boxoffset[j]);
hypre_AddIndexes(cstart, boxoffset[j], 3, cstart);
}
}
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, stride,
loop_size);
/* extend the loop_size so that upper boundary of the box are reached. */
hypre_AddIndexes(loop_size, hi_index, 3, loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* note that the correct cbox corresponding to this non-vanishing
fbox is used. */
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, stride, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,j,entry,cindex,var_index,rank,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
for (j= 0; j< 3; j++)
{
findex[j]*= stride[j];
}
hypre_AddIndexes(findex, start, 3, findex);
hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &j, matrix_type);
/* still row p may be outside the processor- check to make sure in */
if ((j <= fupper_ranks[part][var]) && (j >= flower_ranks[part][var]))
{
hypre_SubtractIndexes(findex, start, 3, findex);
/* determine where the edge lies- coarsening required. */
hypre_StructMapFineToCoarse(findex, zero_index, rfactor,
cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
hypre_AddIndexes(findex, start, 3, findex);
/* lies on coarse Edge. Coarse Edge connection:
var_index= cindex - subtract_index.*/
hypre_SubtractIndexes(cindex, varoffsets[var], 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
vals_edgeEdge[k]= fCedge_ratio;
k++;
}
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI */
hypre_TFree(boxoffset);
hypre_TFree(suboffset);
} /* for (t= 0; t< nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
/* generate the face interpolation weights/info. Only for 3-d */
if (ndim == 3)
{
/* Allocate memory to arrays for the tridiagonal system & solutions.
Take the maximum size needed. */
i= rfactor[0]-1;
for (j= 1; j< ndim; j++)
{
if (i < (rfactor[j]-1))
{
i= rfactor[j]-1;
}
}
upper= hypre_CTAlloc(HYPRE_Real, i);
lower= hypre_CTAlloc(HYPRE_Real, i);
diag = hypre_CTAlloc(HYPRE_Real, i);
face_w1= hypre_CTAlloc(HYPRE_Real, i);
face_w2= hypre_CTAlloc(HYPRE_Real, i);
off_proc_flag= hypre_CTAlloc(HYPRE_Int, i+1);
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
/* may need to shrink a given box in some boxoffset directions */
boxoffset= hypre_TAlloc(hypre_Index, ndim);
for (t= 0; t< ndim; t++)
{
hypre_ClearIndex(boxoffset[t]);
hypre_IndexD(boxoffset[t], t)= rfactor[t]-1;
}
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array= hypre_StructGridBoxes(var_fgrid);
switch(var)
{
case 5:
{
/* 3-d x_edge, can be Y or Z_Face */
fCedge_ratio= 1.0/rfactor[0];
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* x_edge-> Z_Face & Y_Face:
* Z_Face- contract in the z direction only if the
* processor interface is in the z direction
* Y_Face- contract in the y direction if the processor
* interface is in the y direction.
******************************************************/
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[2], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, kshift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* increase the loop_size by one in the Z plane direction */
loop_size[2]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
hypre_SubtractIndexes(cindex, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of x_edges making up the Z_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[0]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[1]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/******************************************************
* grab the already computed lower-end edge weight.
* These are bdry agglomerate wgts that are pre-determined
* so that no communication is needed.
******************************************************/
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[1]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* x_edge for Z_Face: collapse_dir= 2, stencil_dir= 1
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[0]+= p;
for (n= 1; n< rfactor[1]; n++)
{
var_index[1]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
2,
1,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[1]-2]*= -upper[rfactor[1]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[1]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[1]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[1]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(m);
/* Y_Face */
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[1], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, jshift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[1]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
/******************************************************
* Y_Face. Two coarse Edge connections.
* x_Edge (i,j-1,k), (i,j-1,k-1)
******************************************************/
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of x_edges making up the Y_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[0]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[2]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[2]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* x_edge for Y_Face: collapse_dir= 1, stencil_dir= 2
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[0]+= p;
for (n= 1; n< rfactor[2]; n++)
{
var_index[2]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
1,
2,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[2]-2]*= -upper[rfactor[2]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[2]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[2]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[2]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 6:
{
/* 3-d y_edge, can be X or Z_Face */
fCedge_ratio= 1.0/rfactor[1];
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* y_edge-> X_Face & Z_Face:
* Z_Face- contract in the z direction only if the
* processor interface is in the z direction
* X_Face- contract in the x direction if the processor
* interface is in the x direction.
******************************************************/
/* Z_Face */
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[2], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, kshift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* increase the loop_size by one in the Z plane direction */
loop_size[2]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
hypre_SubtractIndexes(cindex, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of y_edges making up the Z_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[1]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[0]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[0]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* y_edge for Z_Face: collapse_dir= 2, stencil_dir= 0
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
2,
0,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[0]-2]*= -upper[rfactor[0]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[0]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[0]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[0]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
/* X_Face */
hypre_CopyBox(cellbox, ©_box);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[0], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, ishift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), kshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[0]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
/******************************************************
* X_Face. Two coarse Edge connections.
* y_Edge (i-1,j,k), (i-1,j,k-1)
******************************************************/
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of y_edges making up the X_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[1]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[2]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[0]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* y_edge for X_Face: collapse_dir= 0, stencil_dir= 2
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[1]+= p;
for (n= 1; n< rfactor[2]; n++)
{
var_index[2]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
0,
2,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[2]-2]*= -upper[rfactor[2]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[2]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[2]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[2]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 7:
{
/* 3-d z_edge, can be X or Y_Face */
fCedge_ratio= 1.0/rfactor[2];
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
/* vboxes inside the i'th cellbox */
num_vboxes= n_CtoVbox[part][i];
vboxnums = CtoVboxnums[part][i];
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
/******************************************************
* Check the location of the shifted lower box index:
* z_edge-> X_Face & Y_Face:
* X_Face- contract in the x direction if the processor
* interface is in the x direction.
* Y_Face- contract in the y direction if the processor
* interface is in the y direction.
******************************************************/
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[0], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, ishift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
/* increase the loop_size by one in the X plane direction */
loop_size[0]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of z_edges making up the X_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[2]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[1]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[1]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* z_edge for X_Face: collapse_dir= 0, stencil_dir= 1
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[1]; n++)
{
var_index[1]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
0,
1,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[1]-2]*= -upper[rfactor[1]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[1]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[1]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[1]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
/* Y_Face */
hypre_CopyBox(cellbox, ©_box);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), jshift, 3,
findex);
/* loop over all the vboxes to see if findex is inside */
row_in= falseV;
for (p= 0; p< num_vboxes[t]; p++)
{
vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]);
if (hypre_IndexInBox(findex, vbox))
{
hypre_CopyIndex(findex, hypre_BoxIMin(©_box));
row_in= trueV;
break;
}
}
/* not in any vbox */
if (!row_in)
{
hypre_AddIndexes(hypre_BoxIMin(©_box), boxoffset[1], 3,
hypre_BoxIMin(©_box));
/* modify cstart */
hypre_AddIndexes(cstart, jshift, 3, cstart);
}
hypre_SubtractIndexes(hypre_BoxIMin(©_box), ishift, 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
loop_size[1]++;
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, m);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,face_w1,face_w2,off_proc_flag,stencil_vals,lower,diag,upper,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(m)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/* because of rfactor striding, cindex= findex. But adjust
by cstart to get actually coarse edge. */
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/* Will need the actual fine indices. */
for (l= 0; l< ndim; l++)
{
findex[l]*= rfactor[l];
}
hypre_AddIndexes(findex, start, 3, findex);
/**********************************************************
* Y_Face (i,j-1,k). Two like-var coarse Edge connections.
* z_Edge (i,j-1,k), (i-1,j-1,k)
**********************************************************/
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2,
matrix_type);
hypre_SubtractIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
/* loop over the strips of y_edges making up the Y_Face and
create the tridiagonal systems by collapsing the stencils. */
for (p= 0; p< rfactor[2]; p++)
{
/* create the rhs's for the tridiagonal system. Require
find the ranks at the endpt's of the strip. */
for (n= 0; n< rfactor[0]-1; n++)
{
face_w1[n]= 0.0;
face_w2[n]= 0.0;
}
/* lower-end and upper-end edge weights */
face_w1[0]= fCedge_ratio;
face_w2[rfactor[0]-2]= fCedge_ratio;
/******************************************************
* create tridiagonal matrix.
* z_edge for Y_Face: collapse_dir= 1, stencil_dir= 0
******************************************************/
hypre_CopyIndex(findex, var_index);
var_index[2]+= p;
for (n= 1; n< rfactor[0]; n++)
{
var_index[0]++;
off_proc_flag[n]=
hypre_CollapseStencilToStencil(Aee,
fgrid_edge,
part,
t,
var_index,
1,
0,
&stencil_vals);
/* put extracted stencil_vals into tridiagonal matrix */
lower[n-1]= stencil_vals[0];
diag[n-1] = stencil_vals[1];
upper[n-1]= stencil_vals[2];
hypre_TFree(stencil_vals);
}
/* solve systems to get weights. Must adjust face_w's so
that the stencil entry contributes. */
face_w1[0]*= -lower[0];
face_w2[rfactor[0]-2]*= -upper[rfactor[0]-2];
hypre_TriDiagSolve(diag, upper, lower, face_w1, rfactor[0]-1);
hypre_TriDiagSolve(diag, upper, lower, face_w2, rfactor[0]-1);
/* place weights into vals_edgeEdge */
for (n= 1; n< rfactor[0]; n++)
{
if (!off_proc_flag[n]) /* off_proc_flag= 1 if offproc */
{
jedge_Edge[k]= rank;
vals_edgeEdge[k]= face_w1[n-1]; /* lower end connection */
k++;
jedge_Edge[k]= rank2;
vals_edgeEdge[k]= face_w2[n-1]; /* upper end connection */
k++;
}
}
} /* for (p= 0; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(m);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
} /* switch */
} /* for (t= 0; t< Edge_nvars; t++) */
hypre_TFree(boxoffset);
} /* for (part= 0; part< nparts; part++) */
hypre_TFree(upper);
hypre_TFree(lower);
hypre_TFree(diag);
hypre_TFree(face_w1);
hypre_TFree(face_w2);
hypre_TFree(off_proc_flag);
} /* if (ndim == 3) */
/* generate the interior interpolation weights/info */
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid);
p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */
/* note that fboxes are the contracted CELL boxes. Will get the correct
variable grid extents. */
fboxes= contract_fedgeBoxes[part];
for (t= 0; t< Edge_nvars; t++)
{
var = Edge_vartypes[t];
var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var);
box_array= hypre_StructGridBoxes(var_fgrid);
switch(var)
{
case 2:
{
/* 2-d x_face = x_edge, can be interior or on X_Edge */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, r);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(r)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/*****************************************************
* Where the fine edge lies wrt the coarse edge:
* Since we stride by rfactor, lindex is
* the coarse index. No coarsening needed, i.e.,
* cindex= findex.
*
* Loop over the interior fine edges in an agglomerate.
*****************************************************/
for (p= 1; p< rfactor[0]; p++)
{
for (n= 0; n< rfactor[1]; n++)
{
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/*interior of Face. Extract the four coarse Edge
(x_Edge ijk & (i-1,j,k) and y_Edge ijk & (i,j-1,k)
column ranks. No weights determined. */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* y_Edges */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[3], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[3], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
} /* for (n= 0; n< rfactor[1]; n++) */
} /* for (p= 1; p< rfactor[0]; p++) */
}
hypre_BoxLoop1End(r);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 3:
{
/* 2-d y_face = y_edge, can be interior or on Y_Edge */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, r);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(r)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/*****************************************************
* Where the fine edge lies wrt the coarse edge:
* Since we stride by rfactor, lindex is
* the coarse index. No coarsening needed, i.e.,
* cindex= findex.
*
* Loop over the interior fine edges in an agglomerate.
*****************************************************/
for (p= 1; p< rfactor[1]; p++)
{
for (n= 0; n< rfactor[0]; n++)
{
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/*lies interior of Face. Extract the four coarse Edge
(y_Edge ijk & (i,j-1,k) and x_Edge ijk & (i-1,j,k)
column ranks. No weights determined. */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* x_Edges */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[2], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[2], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
} /* for (n= 0; n< rfactor[0]; n++) */
} /* for (p= 1; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(r);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 5:
{
/* 3-d x_edge, must be interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, r);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(r)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/*****************************************************
* Where the fine edge lies wrt the coarse edge:
* Since we stride by rfactor, lindex is
* the coarse index. No coarsening needed, i.e.,
* cindex= findex.
*
* Loop over the interior fine edges in an agglomerate.
*****************************************************/
for (p= 1; p< rfactor[2]; p++)
{
for (n= 1; n< rfactor[1]; n++)
{
for (m= 0; m< rfactor[0]; m++)
{
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/***********************************************
* Interior.
* x_Edge ijk, (i,j-1,k), (i,j-1,k-1), (i,j,k-1)
* y_Edge ijk, (i-1,j,k), (i-1,j,k-1), (i,j,k-1)
* z_Edge ijk, (i-1,j,k), (i-1,j-1,k), (i,j-1,k)
*
* vals_edgeEdge's are not set.
***********************************************/
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* y_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* z_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
} /* for (m= 0; m< rfactor[0]; m++) */
} /* for (n= 1; n< rfactor[1]; n++) */
} /* for (p= 1; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(r);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 6:
{
/* 3-d y_edge, must be interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, r);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(r)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/*****************************************************
* Where the fine edge lies wrt the coarse edge:
* Since we stride by rfactor, lindex is
* the coarse index. No coarsening needed, i.e.,
* cindex= findex.
*
* Loop over the interior fine edges in an agglomerate.
*****************************************************/
for (p= 1; p< rfactor[2]; p++)
{
for (n= 1; n< rfactor[0]; n++)
{
for (m= 0; m< rfactor[1]; m++)
{
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/***********************************************
* Interior.
* y_Edge ijk, (i-1,j,k), (i-1,j,k-1), (i,j,k-1)
* z_Edge ijk, (i-1,j,k), (i-1,j-1,k), (i,j-1,k)
* x_Edge ijk, (i,j-1,k), (i,j-1,k-1), (i,j,k-1)
*
* vals_edgeEdge's are not set.
***********************************************/
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* z_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[7], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* x_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
} /* for (m= 0; m< rfactor[1]; m++) */
} /* for (n= 1; n< rfactor[0]; n++) */
} /* for (p= 1; p< rfactor[2]; p++) */
}
hypre_BoxLoop1End(r);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
case 7:
{
/* 3-d z_edge, only the interior */
hypre_ForBoxI(i, fboxes)
{
cellbox= hypre_BoxArrayBox(fboxes, i);
vbox = hypre_BoxArrayBox(box_array, i);
hypre_CopyIndex(Edge_cstarts[part][i], cstart);
/* adjust the project cellbox to the variable box */
hypre_CopyBox(cellbox, ©_box);
hypre_SubtractIndexes(hypre_BoxIMin(©_box), varoffsets[var], 3,
hypre_BoxIMin(©_box));
hypre_BoxGetSize(©_box, loop_size);
hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor,
loop_size);
hypre_CopyIndex(hypre_BoxIMin(©_box), start);
hypre_BoxLoop1Begin(ndim, loop_size,
©_box, start, rfactor, r);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(r)
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]);
/*****************************************************
* Where the fine edge lies wrt the coarse edge:
* Since we stride by rfactor, lindex is
* the coarse index. No coarsening needed, i.e.,
* cindex= findex.
*
* Loop over the interior fine edges in an agglomerate.
*****************************************************/
for (p= 1; p< rfactor[1]; p++)
{
for (n= 1; n< rfactor[0]; n++)
{
for (m= 0; m< rfactor[2]; m++)
{
hypre_CopyIndex(findex, cindex);
hypre_AddIndexes(cindex, cstart, 3, cindex);
/*************************************************
* Interior.
* z_Edge ijk, (i-1,j,k), (i-1,j-1,k), (i,j-1,k)
* x_Edge ijk, (i,j-1,k), (i,j-1,k-1), (i,j,k-1)
* y_Edge ijk, (i-1,j,k), (i-1,j,k-1), (i,j,k-1)
*
* vals_edgeEdge's are not set.
*************************************************/
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
t, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* x_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, jshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[5], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
/* y_Edge */
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(cindex, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_SubtractIndexes(var_index, kshift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
hypre_AddIndexes(var_index, ishift, 3, var_index);
hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index,
vartype_map[6], &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank,
matrix_type);
jedge_Edge[k]= rank;
k++;
} /* for (m= 0; m< rfactor[2]; m++) */
} /* for (n= 1; n< rfactor[0]; n++) */
} /* for (p= 1; p< rfactor[1]; p++) */
}
hypre_BoxLoop1End(r);
} /* hypre_ForBoxI(i, fboxes) */
break;
}
} /* switch */
} /* for (t= 0; t< Edge_nvars; t++) */
} /* for (part= 0; part< nparts; part++) */
hypre_TFree(bdryedge_location);
HYPRE_IJMatrixSetValues(edge_Edge, size1, ncols_edgeEdge,
(const HYPRE_Int*) iedgeEdge, (const HYPRE_Int*) jedge_Edge,
(const HYPRE_Real*) vals_edgeEdge);
HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) edge_Edge);
hypre_TFree(ncols_edgeEdge);
hypre_TFree(iedgeEdge);
hypre_TFree(jedge_Edge);
hypre_TFree(vals_edgeEdge);
/* n_CtoVbox[part][cellboxi][var] & CtoVboxnums[part][cellboxi][var][nvboxes] */
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part);
Edge_nvars= hypre_SStructPGridNVars(p_fgrid);
var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
hypre_ForBoxI(j, fboxes)
{
for (t= 0; t< Edge_nvars; t++)
{
hypre_TFree(CtoVboxnums[part][j][t]);
}
hypre_TFree(n_CtoVbox[part][j]);
hypre_TFree(CtoVboxnums[part][j]);
}
hypre_TFree(n_CtoVbox[part]);
hypre_TFree(CtoVboxnums[part]);
}
hypre_TFree(n_CtoVbox);
hypre_TFree(CtoVboxnums);
for (part= 0; part< nparts; part++)
{
p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part);
var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid);
fboxes = hypre_StructGridBoxes(var_fgrid);
hypre_BoxArrayDestroy(contract_fedgeBoxes[part]);
hypre_TFree(Edge_cstarts[part]);
hypre_TFree(upper_shifts[part]);
hypre_TFree(lower_shifts[part]);
hypre_TFree(cfbox_mapping[part]);
hypre_TFree(fcbox_mapping[part]);
hypre_TFree(fupper_ranks[part]);
hypre_TFree(flower_ranks[part]);
hypre_TFree(cupper_ranks[part]);
hypre_TFree(clower_ranks[part]);
}
hypre_TFree(contract_fedgeBoxes);
hypre_TFree(Edge_cstarts);
hypre_TFree(upper_shifts);
hypre_TFree(lower_shifts);
hypre_TFree(cfbox_mapping);
hypre_TFree(fcbox_mapping);
hypre_TFree(fupper_ranks);
hypre_TFree(flower_ranks);
hypre_TFree(cupper_ranks);
hypre_TFree(clower_ranks);
hypre_TFree(varoffsets);
hypre_TFree(vartype_map);
if (ndim > 2)
{
(PTopology -> Face_iedge) = Face_iedge;
(PTopology -> Element_Face) = Element_Face;
}
(PTopology -> Element_iedge)= Element_iedge;
(PTopology -> Edge_iedge) = Edge_iedge;
(PTopology -> Element_Edge) = Element_Edge;
return edge_Edge;
}
/*--------------------------------------------------------------------------
* hypre_CollapseStencilToStencil: Collapses 3d stencil shape & values to
* a 2d 3-point stencil: collapsed_vals= [ldiag diag udiag].
* Algo:
* 1) Given the collapsing direction & the collapsed stencil pattern,
* group the ranks into three collapsed sets: diag_ranks, ldiag_ranks,
* udiag_ranks.
* 2) concatenate these sets, marking the set location
* 3) qsort the concatenated set and the col_inds
* 4) search compare the two sorted arrays to compute the collapsed vals.
*
* Example, suppose collapsing to y_edges. Then the new_stencil pattern
* is [n c s]^t and we need to collapse in the x direction to get this
* 3-pt stencil: collapse_dir= 0 & new_stencil_dir= 1.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CollapseStencilToStencil(hypre_ParCSRMatrix *Aee,
hypre_SStructGrid *grid,
HYPRE_Int part,
HYPRE_Int var,
hypre_Index pt_location,
HYPRE_Int collapse_dir,
HYPRE_Int new_stencil_dir,
HYPRE_Real **collapsed_vals_ptr)
{
HYPRE_Int ierr= 0;
HYPRE_Int matrix_type= HYPRE_PARCSR;
HYPRE_Int start_rank = hypre_ParCSRMatrixFirstRowIndex(Aee);
HYPRE_Int end_rank = hypre_ParCSRMatrixLastRowIndex(Aee);
hypre_BoxManEntry *entry;
HYPRE_Int *ranks;
HYPRE_Int *marker; /* marker to record the rank groups */
HYPRE_Int max_ranksize= 9;
HYPRE_Real *collapsed_vals;
hypre_Index index1, index2;
HYPRE_Int size, *col_inds, *col_inds2;
HYPRE_Real *values;
HYPRE_Int rank, row_rank, *swap_inds;
HYPRE_Int i, j, m, centre, found;
HYPRE_Int getrow_ierr;
HYPRE_Int cnt;
/* create the collapsed stencil coefficients. Three components. */
collapsed_vals= hypre_CTAlloc(HYPRE_Real, 3);
/* check if the row corresponding to pt_location is on this proc. If
not, return an identity row. THIS SHOULD BE CORRECTED IN THE FUTURE
TO GIVE SOMETHING MORE REASONABLE. */
hypre_SStructGridFindBoxManEntry(grid, part, pt_location, var, &entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, pt_location, &rank, matrix_type);
if (rank < start_rank || rank > end_rank)
{
collapsed_vals[1] = 1.0;
*collapsed_vals_ptr= collapsed_vals;
ierr= 1;
return ierr;
}
/* Extract the ranks of the collapsed stencil pattern. Since only like-var
collapsing, we assume that max stencil size is 9. This agrees with the
assumed pattern surrounding pt_location. Concatenating done. */
ranks = hypre_TAlloc(HYPRE_Int, max_ranksize);
marker= hypre_TAlloc(HYPRE_Int, max_ranksize);
cnt= 0;
centre= 0;
for (j= -1; j<= 1; j++)
{
hypre_CopyIndex(pt_location, index1);
index1[new_stencil_dir]+= j;
for (i= -1; i<= 1; i++)
{
hypre_CopyIndex(index1, index2);
index2[collapse_dir]+= i;
hypre_SStructGridFindBoxManEntry(grid, part, index2, var, &entry);
if (entry)
{
hypre_SStructBoxManEntryGetGlobalRank(entry, index2, &rank, matrix_type);
ranks[cnt] = rank;
marker[cnt]= j+1;
/* mark centre component- entry!=NULL always */
if ( (!i) && (!j) )
{
centre= cnt;
}
cnt++;
}
}
}
/* Grab the row corresponding to index pt_location. rank located in location
centre of ranks, i.e., rank for index2= pt_location. Mark location of values,
which will record the original location of values after the sorting. */
row_rank= ranks[centre];
getrow_ierr= HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) Aee, row_rank,
&size, &col_inds, &values);
if (getrow_ierr < 0)
hypre_printf("offproc collapsing problem");
swap_inds= hypre_TAlloc(HYPRE_Int, size);
col_inds2= hypre_TAlloc(HYPRE_Int, size);
for (i= 0; i< size; i++)
{
swap_inds[i]= i;
col_inds2[i]= col_inds[i];
}
/* qsort ranks & col_inds */
hypre_qsort2i(ranks, marker, 0, cnt-1);
hypre_qsort2i(col_inds2, swap_inds, 0, size-1);
/* search for values to collapse */
m= 0;
for (i= 0; i< cnt; i++)
{
found= 0;
while (!found)
{
if (ranks[i] != col_inds2[m])
{
m++;
}
else
{
collapsed_vals[marker[i]]+= values[swap_inds[m]];
m++;
break; /* break out of while loop */
}
} /* while (!found) */
} /* for (i= 0; i< cnt; i++) */
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) Aee, row_rank, &size,
&col_inds, &values);
hypre_TFree(col_inds2);
hypre_TFree(ranks);
hypre_TFree(marker);
hypre_TFree(swap_inds);
*collapsed_vals_ptr= collapsed_vals;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_TriDiagSolve: Direct tridiagonal solve
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_TriDiagSolve(HYPRE_Real *diag,
HYPRE_Real *upper,
HYPRE_Real *lower,
HYPRE_Real *rhs,
HYPRE_Int size)
{
HYPRE_Int ierr= 0;
HYPRE_Int i, size1;
HYPRE_Real *copy_diag;
HYPRE_Real multiplier;
size1= size-1;
/* copy diag so that the matrix is not modified */
copy_diag= hypre_TAlloc(HYPRE_Real, size);
for (i= 0; i< size; i++)
{
copy_diag[i]= diag[i];
}
/* forward substitution */
for (i= 1; i< size; i++)
{
multiplier= -lower[i]/copy_diag[i-1];
copy_diag[i]+= multiplier*upper[i-1];
rhs[i] += multiplier*rhs[i-1];
}
/* backward substitution */
rhs[size1]/= copy_diag[size1];
for (i= size1-1; i>= 0; i--)
{
rhs[i]= (rhs[i] - upper[i]*rhs[i+1])/copy_diag[i];
}
hypre_TFree(copy_diag);
return ierr;
}
|
racf_fmt_plug.c | /* RACF cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Thanks to Nigel Pentland <nigel at nigelpentland.net>, author of CRACF for
* providing algorithm details.
*
* Thanks to Main Framed <mainframed767 at gmail.com> for providing test vectors,
* algorithm details and requesting the RACF cracker in the first place.
*
* racfdump format => userid:$racf$*userid*deshash
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_racf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_racf);
#else
#include <openssl/des.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned K8-dual HT
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "RACF"
#define FORMAT_NAME ""
#define FORMAT_TAG "$racf$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 8
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 8
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const unsigned char a2e[256] = {
0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
64, 79,127,123, 91,108, 80,125, 77, 93, 92, 78,107, 96, 75, 97,
240,241,242,243,244,245,246,247,248,249,122, 94, 76,126,110,111,
124,193,194,195,196,197,198,199,200,201,209,210,211,212,213,214,
215,216,217,226,227,228,229,230,231,232,233, 74,224, 90, 95,109,
121,129,130,131,132,133,134,135,136,137,145,146,147,148,149,150,
151,152,153,162,163,164,165,166,167,168,169,192,106,208,161, 7,
32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62,225,
65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
88, 89, 98, 99,100,101,102,103,104,105,112,113,114,115,116,117,
118,119,120,128,138,139,140,141,142,143,144,154,155,156,157,158,
159,160,170,171,172,173,174,175,176,177,178,179,180,181,182,183,
184,185,186,187,188,189,190,191,202,203,204,205,206,207,218,219,
220,221,222,223,234,235,236,237,238,239,250,251,252,253,254,255
};
/* This is a2e[] with each entry XOR 0x55, left-shifted one bit
and finally with odd parity so that DES_set_key_unchecked
can be used directly. This provides about 15% speed up. */
static const unsigned char a2e_precomputed[256] = {
171, 168, 174, 173, 196, 241, 247, 244, 134, 161, 224, 188, 179, 176, 182, 181,
138, 137, 143, 140, 211, 208, 206, 230, 155, 152, 213, 229, 146, 145, 151, 148,
42, 52, 84, 93, 28, 115, 11, 81, 49, 16, 19, 55, 124, 107, 61, 104,
74, 73, 79, 76, 67, 64, 70, 69, 91, 88, 94, 22, 50, 87, 118, 117,
82, 41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7,
4, 26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 62, 107, 31, 21, 112,
88, 168, 174, 173, 162, 161, 167, 164, 186, 185, 137, 143, 140, 131, 128, 134,
133, 155, 152, 239, 236, 227, 224, 230, 229, 251, 248, 42, 127, 11, 233, 164,
234, 233, 239, 236, 227, 128, 167, 133, 251, 248, 254, 253, 242, 185, 191, 157,
203, 200, 158, 205, 194, 193, 199, 186, 218, 217, 223, 220, 162, 131, 214, 104,
41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7, 4,
26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 74, 73, 79, 76, 67, 64,
70, 69, 91, 171, 191, 188, 179, 176, 182, 181, 138, 158, 157, 146, 145, 151,
148, 234, 254, 253, 242, 241, 247, 244, 203, 200, 206, 205, 194, 193, 199, 196,
218, 217, 223, 220, 211, 208, 214, 213, 62, 61, 50, 49, 55, 52, 31, 28,
19, 16, 22, 21, 127, 124, 115, 112, 118, 117, 94, 93, 82, 81, 87, 84
};
/* in-place ascii2ebcdic conversion */
static void ascii2ebcdic(unsigned char *str)
{
int i;
int n = strlen((const char*)str);
for (i = 0; i < n; ++i)
str[i] = a2e[str[i]];
}
/* replace missing characters in userid by EBCDIC spaces (0x40) */
static void process_userid(unsigned char *str)
{
int i;
for (i = strlen((const char*)str); i < 8; ++i)
str[i] = 0x40;
str[8] = 0; /* terminate string */
}
#ifdef RACF_DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static struct fmt_tests racf_tests[] = {
{"$racf$*AAAAAAA*CA2E330B2FD1820E", "AAAAAAAA"},
{"$racf$*AAAAAAAA*062314297C496E0E", "AAAAAAAA"},
{"$racf$*JJJJJJJJ*8B5F0B1D0826D927", "TESTTEST"},
{"$racf$*TTTTTTTT*424B258AF8B9061B", "TESTTEST"},
{"$racf$*A*0F7DE80335E8ED68", "A"},
{"$racf$*OPEN3*EC76FC0DEF5B0A83", "SYS1"},
{"$racf$*TESTTEST*0FF48804F759193F", "TESTTEST"},
{"$racf$*SYSOPR*83845F8EEC7C20D8", "SYSOPR"},
{"$racf$*TCPIP*657889CD0F5D40DF", "SYS1"},
{"$racf$*TESTER*E05AB770EA048421", "TEST"},
{NULL}
};
static struct custom_salt {
unsigned char userid[8 + 1];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static DES_key_schedule (*schedules);
static int dirty;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
schedules = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*schedules));
}
static void done(void)
{
MEM_FREE(schedules);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "*"); /* username */
if (!p)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (hexlenu(p, &extra) != CIPHERTEXT_LENGTH || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *username;
static struct custom_salt cs;
ctcopy += FORMAT_TAG_LEN; /* skip over "$racf$*" */
username = strtokm(ctcopy, "*");
/* process username */
strncpy((char*)cs.userid, username, 8);
cs.userid[8] = 0; // terminate username at 8 bytes
ascii2ebcdic(cs.userid);
process_userid(cs.userid);
#ifdef RACF_DEBUG
printf("userid in EBCDIC : ");
print_hex(cs.userid, 8);
#endif
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
if (dirty) {
DES_cblock des_key;
int i;
/* process key */
for (i = 0; saved_key[index][i]; i++)
des_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];
/* replace missing characters in userid by (EBCDIC space (0x40) XOR 0x55) << 1 */
while(i < 8)
des_key[i++] = 0x2a;
DES_set_key_unchecked(&des_key, &schedules[index]);
}
/* do encryption */
DES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, (DES_cblock*)crypt_out[index], &schedules[index], DES_ENCRYPT);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void racf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > 8)
saved_len = 8;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_racf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ FORMAT_TAG },
racf_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
racf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2020 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifndef OWL_CORE_CONV_IMPL
#define OWL_CORE_CONV_IMPL
/*
* Calculate the block sizes for convolution operations.
* Code heavily inspired by Eigen (http://eigen.tuxfamily.org/).
*/
#define IM2COL_THRESHOLD 512 * 1024
#define ALIGN_SIZE 32 // for AVX address alignment
// The effect of calculating block size according to cache sizes is yet to be
// proved here since we use OpenBLAS GEMM directly; also, note that we
// calculate `InputMatrix x KernelMatrix`, not the other way around.
void compute_block_sizes(int* kp, int* mp, int* np, int typesize) {
int l1, l2, l3;
query_cache_sizes(&l1, &l2, &l3);
// set the cache sizes to small numbers when debugging
int k = *kp;
int m = *mp;
int n = *np;
if (fmaxf(k, fmaxf(m, n)) < 50) {
return;
}
int nr = 4;
int num_reg = 16;
int mr = num_reg / (2 * nr) * typesize;
int k_strip = 8;
int k_div = (mr + nr) * typesize;
int k_sub = mr * nr * typesize;
const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1);
const int old_k = k;
if (k > max_kc) {
k = (k % max_kc) == 0 ? max_kc
: max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1)));
//assert (old_k / k == old_k / max_kc);
}
int max_nc;
const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864
const int lhs_bytes = m * k * typesize;
const int rest_l1 = l1 - k_sub - lhs_bytes;
if (rest_l1 >= nr * k * typesize) {
max_nc = rest_l1 / (k * typesize);
} else {
max_nc = (3 * actual_l2) / (4 * max_kc * typesize);
}
int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1));
if (n > nc) {
n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1))));
} else if (old_k == k) {
int kn_size = k * n * typesize;
int actual_lm = actual_l2;
int max_mc = m;
if (kn_size < 1024) {
actual_lm = l1;
} else if (l3 != 0 && kn_size <= 32768) {
actual_lm = l2;
max_mc = fminf(576, max_mc);
}
int mc = fminf(actual_lm / (3 * k * typesize), max_mc);
if (mc > mr) {
mc -= mc % mr;
}
else if (mc == 0) {
*kp = k; *mp = m; *np = n;
return;
}
m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1))));
}
*kp = k; *mp = m; *np = n;
return;
}
#endif /* OWL_CORE_CONV_IMPL */
#ifdef OWL_ENABLE_TEMPLATE
#ifdef AVX_PSIZE
/*
* Fill in temporary input matrix from input tensor with vectorisation.
* Currently only support AVX instruction set.
*/
void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k,
int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart,
int rstart, int input_cols, int input_rows, short reverse_mode
) {
// assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input
// matrix can always be loaded consecutively by a step of AVX_PSIZE
for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREA(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
}
*cmk_ptr += AVX_PSIZE;
}
return;
}
void ACX_FUN_LOAD (load_sub_matrix, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc,
int k, int kernel_ri, int input_ri, int in_channel, int idx_base,
int cstart, int rstart, int input_cols, int input_rows,
int kernel_rows, short reverse_mode
){
int ik = 0;
// first, load `kc_strip` numbers with a step of AVX_PSIZE;
// assume `kc_strip % AVX_PSIZE == 0`
for ( ; ik < kc_strip; ik += AVX_PSIZE) {
const int cr_set[2] = {(k + ik) / in_channel,
(k + ik + AVX_PSIZE - 1) / in_channel};
const int c_set[2] = {cr_set[0] / kernel_rows,
cr_set[1] / kernel_rows};
const int cols[2] = {cstart + c_set[0], cstart + c_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (cols[0] >= input_cols || cols[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
else if (cols[0] == cols[1]) {
const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows,
cr_set[1] - c_set[1] * kernel_rows};
const int rows[2] = {rstart + r_set[0], rstart + r_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (rows[0] >= input_rows || rows[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
// next AVX_PSIZE numbers can be loaded consecutively
else if (rows[0] >= 0 && rows[1] < input_rows) {
int ki = k + ik - cr_set[0] * in_channel;
int input_index = idx_base + cols[0] * input_ri
+ rows[0] * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREU(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
*cmk_ptr += AVX_PSIZE;
continue;
}
}
// previous special cases do not apply; calculate input index one by one
for (int ip = 0; ip < AVX_PSIZE; ip++) {
int kc = (k + ik + ip) / kernel_ri;
int kri = (k + ik + ip) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
}
// second, load the rest `actual_kc - kc_strip` numbers
for (; ik < actual_kc; ik++) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
return;
}
#endif /* AVX_PSIZE */
/*
* GEBP-based implementation. See Goto et.al [08] for detail.
*/
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_cr = kernel_cols * kernel_rows;
const int kernel_ri = kernel_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
// if generated input matrix is small enough, use im2col implementation
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
for (int k = 0; k < kernel_cri; k += kc) {
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
int actual_kc = fminf(k + kc, kernel_cri) - k;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
// iterate along each row of the generated input matrix; processing four
// rows in parallel with the help of e.g. OpenMP should be possible
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_base = b * input_cri;
// fill in the sub input matrix
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
int idx_kn_base = k * out_channel;
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
// fill in the kernel matrix
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
actual_mc, actual_nc, actual_kc, ALPHA,
temp_mk, actual_kc, temp_kn, actual_nc,
BETA, temp_mn, actual_nc);
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = (ix + m) * out_channel + (iy + n);
output_ptr[index_mn] += temp_mn[cmn++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
idx_mn_base += n;
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
actual_mc, actual_kc, actual_nc, ALPHA,
temp_mn, actual_nc, temp_kn, actual_nc,
BETA, temp_mk, actual_kc);
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 1);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
input_ptr[input_index] += temp_mk[cmk];
}
cmk++;
}
#endif
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_mn_base += n;
idx_kn_base += n;
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
memset(temp_kn, 0, nc * kc * sizeof(TYPE));
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
actual_nc, actual_kc, actual_mc, ALPHA,
temp_mn, actual_nc, temp_mk, actual_kc,
BETA, temp_kn, actual_kc);
int cnk = 0;
for (int jn = 0; jn < actual_nc; jn++) {
for (int ik = 0; ik < actual_kc; ik++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
kernel_ptr[index_kn] += temp_kn[cnk++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/color-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/resample.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
PixelInterpolateMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterType
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
PixelInfo
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof(
*resample_filter));
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,
exception);
resample_filter->debug=IsEventLogging();
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined=MagickFalse;
resample_filter->signature=MagickCoreSignature;
SetResampleFilter(resample_filter,image->filter);
(void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
double weight;
const Quantum *pixels;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetPixelInfo(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
resample_filter->interpolate,u0,v0,pixel,resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there more direct way? */
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetPixelInfo(resample_filter->image,(PixelInfo *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,exception);
pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const Quantum *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
GetPixelInfoPixel(resample_filter->image,pixels,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((double)
resample_filter->average_pixel.alpha);
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((double)
resample_filter->image->background_color.alpha);
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.alpha +=
resample_filter->image->background_color.alpha;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.alpha /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->colorspace == CMYKColorspace)
pixel->black = 0.0;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const Quantum *) NULL)
return(MagickFalse);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels);
divisor_m += weight;
if (pixel->alpha_trait != UndefinedPixelTrait)
weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels));
pixel->red += weight*GetPixelRed(resample_filter->image,pixels);
pixel->green += weight*GetPixelGreen(resample_filter->image,pixels);
pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels);
if (pixel->colorspace == CMYKColorspace)
pixel->black += weight*GetPixelBlack(resample_filter->image,pixels);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels+=GetPixelChannels(resample_filter->image);
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->alpha = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha);
divisor_c = 1.0/divisor_c;
pixel->red = (double) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (double) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->black = (double) ClampToQuantum(divisor_c*pixel->black);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major=MagickMaximumValue;
else
Major=sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterType filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterType filter)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsStringTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) != MagickFalse)
{
int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const PixelInterpolateMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - params[0]), 2.f);
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < _v)
return _v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_elu<X,X>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_leakyrelu<X,Z>(d1, d2);
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)) * nd4j::math::nd4j_log<X, Z>(nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2.0f)));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<X, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction) / static_cast<X>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old),
nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(reduction), nd4j::math::nd4j_abs<X>(reduction)));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(X *extraParamsTotal, X *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]);
return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps));
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(32*t3+Nx+19,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),128*t4+126);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
v3-conduct.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <mpi.h>
/**
* 2)
* Parallelization via MPI should be favoured when huge data sets
* are processes on big cluster with different nodes. Additionally
* OpenMP can be used to further parallelize the work on each node.
* This way it's possible to divide the workload over more cores
* than those that would be available on a single node.
*
* 3) a)
* Partitioning the grid into rows is the easiest to implement and
* very likely also the fastest version, due to minimal amount of
* communications (compared to partitioning into squares) and efficient
* cache usage. Partittioning into rows however, results in more data
* being sent compared to squares which could result in a lower perfomance
* with big grid sizes.
**/
#define DEFAULT_GRIDSIZE 128
// Save/ Print only every nth step:
#define PRINTSTEP 1
// Run for that many time steps
#define DEFAULT_TIMESTEPS 1000
void init_cells(double* grid, int gridsize);
void print(double* grid, int padded_grid_size, int time);
void save(FILE *f, double* grid, int padded_grid_size, int time);
int find_option( int argc, char **argv, const char *option );
int read_int( int argc, char **argv, const char *option, int default_value );
char *read_string( int argc, char **argv, const char *option, char *default_value );
int main(int argc, char** argv) {
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status status;
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the grid size\n" );
printf( "-t <int> to set the number of time steps\n" );
printf( "-o <filename> to specify the output file name\n" );
return 0;
}
int GRIDSIZE = read_int( argc, argv, "-n", DEFAULT_GRIDSIZE );
// Check gridsize for some basic assumptions
if(GRIDSIZE%2) {
printf("Only even Gridsize allowed!\n");
return 1;
}
int TIMESTEPS = read_int( argc, argv, "-t", DEFAULT_TIMESTEPS );
char *savename = read_string( argc, argv, "-o", "sample_conduct.txt" );
FILE *f = savename ? fopen( savename, "w" ) : NULL;
if( f == NULL )
{
printf( "failed to open %s\n", savename );
return 1;
}
double *T, *T_block, *Tn_block;
// Allocate Grid with a padding on every side for convenience
int padded_grid_size = GRIDSIZE+2;
int blocksize = GRIDSIZE / numtasks;
if(rank == 0) {
T=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
// temp grid to hold the calculated data for the next time step
init_cells(T,padded_grid_size);
}
T_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
Tn_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
MPI_Scatter(&T[padded_grid_size], padded_grid_size * blocksize, MPI_DOUBLE, &T_block[padded_grid_size],
padded_grid_size * blocksize, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// remember -- our grid has a border around it!
int last_row = padded_grid_size * (blocksize + 1);
int next_to_last_row = last_row - padded_grid_size;
for(int t=0;t<TIMESTEPS;t++) { // Loop for the time steps
if(rank > 0 && rank < numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42,
T_block, padded_grid_size, MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, NULL);
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42,
&T_block[last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42, MPI_COMM_WORLD, NULL);
} else if(rank == 0) {
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, 1, 42, &T_block[last_row], padded_grid_size,
MPI_DOUBLE, 1, 42, MPI_COMM_WORLD, &status);
} else if(rank == numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42, T_block, padded_grid_size,
MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, &status);
}
// Calculate grid cells for next timestep
#pragma omp parallel for default(none) shared(blocksize, padded_grid_size, Tn_block, T_block)
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
Tn_block[i*padded_grid_size + j] = (T_block[(i-1)*padded_grid_size+j] + T_block[i*padded_grid_size + (j-1)] \
+ T_block[i*padded_grid_size+(j+1)] + T_block[(i+1)*padded_grid_size+j]) / 4.0;
}
}
// copy new grid into old one
#pragma omp parallel for default(none) shared(blocksize, padded_grid_size, Tn_block, T_block)
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
T_block[i*padded_grid_size+j] = Tn_block[i*padded_grid_size+j];
}
}
MPI_Barrier(MPI_COMM_WORLD);
/**
* This is a quick and dirty fix to implement the save functionionality. A better way would be to have each thread
* save it's part of the grid every PRINTSTEP timestep into it's own file and then merge those individual files, each representing
* a part of the grid over time, into one output file at the end of the simulation. This way we can get rid of the Gather
* and the non rank 0 threads are not idle during the save process.
**/
if(!(t % PRINTSTEP)) {
MPI_Gather(&T_block[padded_grid_size], blocksize*padded_grid_size, MPI_DOUBLE, &T[padded_grid_size],
blocksize*padded_grid_size, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if(rank == 0)
save(f,T,padded_grid_size,TIMESTEPS);
}
}
fclose(f);
MPI_Finalize();
return 0;
}
void init_cells(double* grid, int gridsize) {
// set everything to zero, even the border
#pragma omp parallel for default(none) shared(grid, gridsize)
for(int i=0;i<gridsize;i++) {
for(int j=0;j<gridsize;j++) {
grid[i*gridsize + j]=0;
}
}
// but the most inner 4 cells
for(int i=gridsize/2-1;i<=gridsize/2;i++) {
for(int j=gridsize/2-1;j<=gridsize/2;j++) {
grid[i*gridsize + j]=1;
}
}
}
void print(double* grid, int padded_grid_size, int time) {
printf("\n\n\n");
int i,j;
// we don't want to print the border!
for(i=1;i<padded_grid_size-1;i++) {
for(j=1;j<padded_grid_size-1;j++) {
printf("%.2f ",grid[i*padded_grid_size + j]);
}
printf("\n");
}
}
void save( FILE *f, double* grid, int padded_grid_size,int TIMESTEPS)
{
int i,j;
static int first = 1;
if( first )
{
fprintf( f, "# %d %d\n", TIMESTEPS, padded_grid_size-2 );
first = 0;
}
for(i = 1; i < padded_grid_size-1; i++ ) {
for(j=1; j < padded_grid_size-1; j++) {
fprintf( f, "%.g ", grid[i* padded_grid_size + j] );
}
fprintf(f,"\n");
}
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
int i;
for( i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
char *read_string( int argc, char **argv, const char *option, char *default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return argv[iplace+1];
return default_value;
}
|
trmm_x_sky_n_lo_row_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
#ifdef COMPLEX
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < mat->rows; i++)
for(ALPHA_INT j = 0; j < columns; j++)
alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
for (ALPHA_INT ac = 0; ac < mat->cols; ++ac)
{
ALPHA_INT start = mat->pointers[ac];
ALPHA_INT end = mat->pointers[ac + 1];
ALPHA_INT idx = 1;
ALPHA_INT eles_num = end - start;
for (ALPHA_INT ai = start; ai < end; ++ai)
{
ALPHA_INT cr = ac - eles_num + idx;
if (ac >= cr)
{
ALPHA_Number t;
alpha_mul_3c(t, alpha, mat->values[ai]);
alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]);
}
idx++;
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
#else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#endif
}
|
ext_core.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "ext_core.h"
#include "ext_sweep.h"
#include "ext_macros.h"
#include "ext_kernels.h"
#include "ext_problem.h"
#include "ext_profiler.h"
void ext_solve_(
double *mu_in,
double *eta_in,
double *xi_in,
double *scat_coeff_in,
double *weights_in,
double *velocity_in,
double *xs_in,
int *mat_in,
double *fixed_source_in,
double *gg_cs_in,
int *lma_in)
{
initialise_host_memory(mu_in, eta_in, xi_in, scat_coeff_in, weights_in, velocity_in,
xs_in, mat_in, fixed_source_in, gg_cs_in, lma_in);
double mem_capacity =
mu_len + eta_len + xi_len + scat_coeff_len + weights_len + velocity_len +
xs_len + mat_len + fixed_source_len + gg_cs_len + lma_len + flux_i_len +
flux_j_len + flux_k_len + dd_j_len + dd_k_len + total_cross_section_len +
scat_cs_len + denom_len + source_len + time_delta_len + groups_todo_len +
g2g_source_len + scalar_flux_len*4 + scalar_mom_len + flux_in_len +
flux_out_len;
printf("This problem requires more than %.3fGB of memory capacity.\n",
(mem_capacity * sizeof(double)) / (1024*1024*1024));
#pragma omp target update if(OFFLOAD) device(MIC_DEVICE) \
to(nx, ny, nz, ng, nang, noct, cmom, nmom, \
nmat, ichunk, timesteps, dt, dx, dy, dz, outers, inners, \
epsi, tolr, dd_i, global_timestep)
#pragma omp target data if(OFFLOAD) device(MIC_DEVICE) \
map(to: mu[:mu_len], eta[:eta_len], xi[:xi_len], scat_coeff[:scat_coeff_len], \
weights[:weights_len], velocity[:velocity_len], xs[:xs_len], mat[:mat_len], \
fixed_source[:fixed_source_len], gg_cs[:gg_cs_len], lma[:lma_len]) \
map(alloc: flux_i[:flux_i_len], flux_j[:flux_j_len], flux_k[:flux_k_len], \
dd_j[:dd_j_len], dd_k[:dd_k_len], total_cross_section[:total_cross_section_len], \
scat_cs[:scat_cs_len], denom[:denom_len], source[:source_len], \
time_delta[:time_delta_len], groups_todo[:groups_todo_len], g2g_source[:g2g_source_len], \
old_outer_scalar[:scalar_flux_len], new_scalar[:scalar_flux_len], \
old_inner_scalar[:scalar_flux_len])\
map(from: scalar_flux[:scalar_flux_len], flux_in[:flux_in_len],\
flux_out[:flux_out_len], scalar_mom[:scalar_mom_len])
{
initialise_device_memory();
iterate();
}
_mm_free(old_outer_scalar);
_mm_free(new_scalar);
_mm_free(old_inner_scalar);
_mm_free(groups_todo);
}
// Argument list:
// nx, ny, nz are the (local to MPI task) dimensions of the grid
// ng is the number of energy groups
// cmom is the "computational number of moments"
// ichunk is the number of yz planes in the KBA decomposition
// dd_i, dd_j(nang), dd_k(nang) is the x,y,z (resp) diamond difference coefficients
// mu(nang) is x-direction cosines
// scat_coef [ec](nang,cmom,noct) - Scattering expansion coefficients
// time_delta [vdelt](ng) - time-absorption coefficient
// denom(nang,nx,ny,nz,ng) - Sweep denominator, pre-computed/inverted
// weights(nang) - angle weights for scalar reduction
void initialise_host_memory(
double *mu_in,
double *eta_in,
double *xi_in,
double *scat_coeff_in,
double *weights_in,
double *velocity_in,
double *xs_in,
int *mat_in,
double *fixed_source_in,
double *gg_cs_in,
int *lma_in)
{
START_PROFILING;
flux_i = (double*)_mm_malloc(sizeof(double)*nang*ny*nz*ng, VEC_ALIGN);
flux_j = (double*)_mm_malloc(sizeof(double)*nang*nx*nz*ng, VEC_ALIGN);
flux_k = (double*)_mm_malloc(sizeof(double)*nang*nx*ny*ng, VEC_ALIGN);
dd_j = (double*)_mm_malloc(sizeof(double)*nang, VEC_ALIGN);
dd_k = (double*)_mm_malloc(sizeof(double)*nang, VEC_ALIGN);
total_cross_section = (double*)_mm_malloc(sizeof(double)*nx*ny*nz*ng, VEC_ALIGN);
scat_cs = (double*)_mm_malloc(sizeof(double)*nmom*nx*ny*nz*ng, VEC_ALIGN);
denom = (double*)_mm_malloc(sizeof(double)*nang*nx*ny*nz*ng, VEC_ALIGN);
source = (double*)_mm_malloc(sizeof(double)*cmom*nx*ny*nz*ng, VEC_ALIGN);
time_delta = (double*)_mm_malloc(sizeof(double)*ng, VEC_ALIGN);
groups_todo = (unsigned int*)_mm_malloc(sizeof(unsigned int)*ng, VEC_ALIGN);
g2g_source = (double*)_mm_malloc(sizeof(double)*cmom*nx*ny*nz*ng, VEC_ALIGN);
scalar_flux = (double*)_mm_malloc(sizeof(double)*nx*ny*nz*ng, VEC_ALIGN);
flux_in = (double*)_mm_malloc(sizeof(double)*nang*nx*ny*nz*ng*noct, VEC_ALIGN);
flux_out = (double*)_mm_malloc(sizeof(double)*nang*nx*ny*nz*ng*noct, VEC_ALIGN);
scalar_mom = (double*)_mm_malloc(sizeof(double)*(cmom-1)*nx*ny*nz*ng, VEC_ALIGN);
old_outer_scalar = (double*)_mm_malloc(sizeof(double)*nx*ny*nz*ng, VEC_ALIGN);
old_inner_scalar = (double*)_mm_malloc(sizeof(double)*nx*ny*nz*ng, VEC_ALIGN);
new_scalar = (double*)_mm_malloc(sizeof(double)*nx*ny*nz*ng, VEC_ALIGN);
// Read-only buffers initialised in Fortran code
mu = mu_in;
eta = eta_in;
xi = xi_in;
weights = weights_in;
velocity = velocity_in;
mat = mat_in;
fixed_source = fixed_source_in;
gg_cs = gg_cs_in;
lma = lma_in;
xs = xs_in;
scat_coeff = transpose_scat_coeff(scat_coeff_in);
STOP_PROFILING(__func__, false);
}
// Initialises the problem parameters
void ext_initialise_parameters_(
int *nx_, int *ny_, int *nz_,
int *ng_, int *nang_, int *noct_,
int *cmom_, int *nmom_,
int *ichunk_,
double *dx_, double *dy_, double *dz_,
double *dt_,
int *nmat_,
int *timesteps_, int *outers_, int *inners_,
double *epsi_, double *tolr_)
{
START_PROFILING;
// Save problem size information to globals
nx = *nx_;
ny = *ny_;
nz = *nz_;
ng = *ng_;
nang = *nang_;
noct = *noct_;
cmom = *cmom_;
nmom = *nmom_;
ichunk = *ichunk_;
dx = *dx_;
dy = *dy_;
dz = *dz_;
dt = *dt_;
nmat = *nmat_;
timesteps = *timesteps_;
outers = *outers_;
inners = *inners_;
epsi = *epsi_;
tolr = *tolr_;
if (nx != ichunk)
{
printf("Warning: nx and ichunk are different - expect the answers to be wrong...\n");
}
STOP_PROFILING(__func__, false);
}
// Do the timestep, outer and inner iterations
void iterate(void)
{
unsigned int num_groups_todo;
bool outer_done;
double t1 = omp_get_wtime();
// Timestep loop
for (unsigned int t = 0; t < timesteps; t++)
{
unsigned int tot_outers = 0;
unsigned int tot_inners = 0;
global_timestep = t;
// Calculate data required at the beginning of each timestep
zero_scalar_flux();
zero_flux_moments_buffer();
// Outer loop
outer_done = false;
for (unsigned int o = 0; o < outers; o++)
{
// Reset the inner convergence list
bool inner_done = false;
#pragma omp target if(OFFLOAD) device(MIC_DEVICE)
for (unsigned int g = 0; g < ng; g++)
{
groups_todo[g] = g;
}
num_groups_todo = ng;
tot_outers++;
calc_total_cross_section();
calc_scattering_cross_section();
calc_dd_coefficients();
calc_time_delta();
calc_denominator();
// Compute the outer source
calc_outer_source();
// Save flux
store_scalar_flux(old_outer_scalar);
// Inner loop
for (unsigned int i = 0; i < inners; i++)
{
tot_inners++;
// Compute the inner source
calc_inner_source();
// Save flux
store_scalar_flux(old_inner_scalar);
zero_edge_flux_buffers();
#ifdef TIMING
double t1 = omp_get_wtime();
#endif
// Sweep
perform_sweep(num_groups_todo);
#ifdef TIMING
double t2 = omp_get_wtime();
printf("sweep took: %lfs\n", t2-t1);
#endif
// Scalar flux
reduce_angular();
#ifdef TIMING
double t3 = omp_get_wtime();
printf("reductions took: %lfs\n", t3-t2);
#endif
// Check convergence
store_scalar_flux(new_scalar);
#ifdef TIMING
double t4 = omp_get_wtime();
#endif
inner_done = check_convergence(old_inner_scalar, new_scalar,
epsi, groups_todo, &num_groups_todo, true);
#ifdef TIMING
double t5 = omp_get_wtime();
printf("inner conv test took %lfs\n",t5-t4);
#endif
if (inner_done)
{
break;
}
}
// Check convergence
outer_done = check_convergence(old_outer_scalar, new_scalar,
100.0*epsi, groups_todo, &num_groups_todo, false);
if (outer_done && inner_done)
{
break;
}
}
printf("Time %d - %d outers, %d inners.\n", t, tot_outers, tot_inners);
// Exit the time loop early if outer not converged
if (!outer_done)
{
break;
}
}
double t2 = omp_get_wtime();
printf("Time to convergence: %.3lfs\n", t2-t1);
if (!outer_done)
{
printf("Warning: did not converge\n");
}
PRINT_PROFILING_RESULTS;
}
// Compute the scalar flux from the angular flux
void reduce_angular(void)
{
START_PROFILING;
zero_scalar_flux();
zero_flux_moments_buffer();
double* angular = (global_timestep % 2 == 0) ? flux_out : flux_in;
double* angular_prev = (global_timestep % 2 == 0) ? flux_in : flux_out;
#pragma omp target if(OFFLOAD) device(MIC_DEVICE)
for(unsigned int o = 0; o < 8; ++o)
{
#pragma omp parallel for collapse(3)
for(int k = 0; k < nz; ++k)
{
for(int j = 0; j < ny; ++j)
{
for(int i = 0; i < nx; ++i)
{
#pragma omp simd lastprivate(i,j,k,o) aligned(weights:64)
for (unsigned int g = 0; g < ng; g++)
{
const bool tg = time_delta(g) != 0.0;
for (unsigned int a = 0; a < nang; a++)
{
const double weight = weights(a);
const double ang = angular(o,i,j,k,g,a);
const double ang_p = angular_prev(o,i,j,k,g,a);
if (tg)
{
scalar_flux(g,i,j,k) += weight * (0.5 * (ang + ang_p));
for (unsigned int l = 0; l < (cmom-1); l++)
{
scalar_mom(g,l,i,j,k) += scat_coeff(l+1,a,o)
* weight * (0.5 * (ang + ang_p));
}
}
else
{
scalar_flux(g,i,j,k) += weight * ang;
for (unsigned int l = 0; l < (cmom-1); l++)
{
scalar_mom(g,l,i,j,k) +=
scat_coeff(l+1,a,o) * weight * ang;
}
}
}
}
}
}
}
}
STOP_PROFILING(__func__, true);
}
// Copy the scalar flux value back to the host and transpose
void ext_get_transpose_scalar_flux_(double* out_scalar)
{
// Transpose the data into the original SNAP format
for (unsigned int g = 0; g < ng; g++)
{
for (unsigned int k = 0; k < nz; k++)
{
for (unsigned int j = 0; j < ny; j++)
{
for (unsigned int i = 0; i < nx; i++)
{
out_scalar[i+(nx*j)+(nx*ny*k)+(nx*ny*nz*g)]
= scalar_flux[g+(ng*i)+(ng*nx*j)+(ng*nx*ny*k)];
}
}
}
}
}
void ext_get_transpose_scalar_moments_(double* out_scalar_moments)
{
// Transpose the data into the original SNAP format
for (unsigned int g = 0; g < ng; g++)
{
for (unsigned int l = 0; l < cmom-1; l++)
{
for (unsigned int i = 0; i < nx; i++)
{
for (unsigned int j = 0; j < ny; j++)
{
for (unsigned int k = 0; k < nz; k++)
{
out_scalar_moments[l+((cmom-1)*i)+((cmom-1)*nx*j)+((cmom-1)*nx*ny*k)+((cmom-1)*nx*ny*nz*g)]
= scalar_mom(g,l,i,j,k);
}
}
}
}
}
}
// Copy the flux_out buffer back to the host
void ext_get_transpose_output_flux_(double* output_flux)
{
double *tmp = (global_timestep % 2 == 0) ? flux_out : flux_in;
// Transpose the data into the original SNAP format
for (int a = 0; a < nang; a++)
{
for (int g = 0; g < ng; g++)
{
for (int k = 0; k < nz; k++)
{
for (int j = 0; j < ny; j++)
{
for (int i = 0; i < nx; i++)
{
for (int o = 0; o < noct; o++)
{
output_flux[a+(nang*i)+(nang*nx*j)+(nang*nx*ny*k)+(nang*nx*ny*nz*o)+(nang*nx*ny*nz*noct*g)]
= tmp[a+(nang*g)+(nang*ng*i)+(nang*ng*nx*j)+(nang*ng*nx*ny*k)+(nang*nx*ny*nz*ng*o)];
}
}
}
}
}
}
}
// Transpose the scatter coefficients matrix
double* transpose_scat_coeff(double* scat_coeff_in)
{
START_PROFILING;
double* scat_coeff = (double*)_mm_malloc(sizeof(double)*nang*cmom*noct, VEC_ALIGN);
for(unsigned int o = 0; o < noct; ++o)
{
for(unsigned int l = 0; l < cmom; ++l)
{
for(unsigned int a = 0; a < nang; ++a)
{
scat_coeff[l+a*cmom+o*(cmom*nang)] =
scat_coeff_in[a+l*nang+o*(cmom*nang)];
}
}
}
STOP_PROFILING(__func__,true);
return scat_coeff;
}
|
par_csr_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_ParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "../seq_mv/HYPRE_seq_mv.h"
#include "../seq_mv/csr_matrix.h"
/* In addition to publically accessible interface in HYPRE_mv.h, the
implementation in this file uses accessor macros into the sequential matrix
structure, and so includes the .h that defines that structure. Should those
accessor functions become proper functions at some later date, this will not
be necessary. AJC 4/99 */
HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*);
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCreate
*--------------------------------------------------------------------------*/
/* If create is called and row_starts and col_starts are NOT null, then it is
assumed that they are of length 2 containing the start row of the calling
processor followed by the start row of the next processor - AHB 6/05 */
hypre_ParCSRMatrix*
hypre_ParCSRMatrixCreate( MPI_Comm comm,
HYPRE_BigInt global_num_rows,
HYPRE_BigInt global_num_cols,
HYPRE_BigInt *row_starts,
HYPRE_BigInt *col_starts,
HYPRE_Int num_cols_offd,
HYPRE_Int num_nonzeros_diag,
HYPRE_Int num_nonzeros_offd )
{
hypre_ParCSRMatrix *matrix;
HYPRE_Int num_procs, my_id;
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt first_row_index, first_col_diag;
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
if (!row_starts)
{
hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id,
&row_starts);
}
if (!col_starts)
{
if (global_num_rows == global_num_cols)
{
col_starts = row_starts;
}
else
{
hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id,
&col_starts);
}
}
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = row_starts[0];
local_num_rows = row_starts[1]-first_row_index ;
first_col_diag = col_starts[0];
local_num_cols = col_starts[1]-first_col_diag;
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixDiag(matrix) =
hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag);
hypre_ParCSRMatrixOffd(matrix) =
hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd);
hypre_ParCSRMatrixDiagT(matrix) = NULL;
hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixProcOrdering(matrix) = NULL;
hypre_ParCSRMatrixAssumedPartition(matrix) = NULL;
hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1;
/* We could make these null instead of leaving the range. If that change is
made, then when this create is called from functions like the
matrix-matrix multiply, be careful not to generate a new partition. */
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
hypre_ParCSRMatrixCommPkgT(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
{
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
}
hypre_ParCSRMatrixRowindices(matrix) = NULL;
hypre_ParCSRMatrixRowvalues(matrix) = NULL;
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
matrix->bdiaginv = NULL;
matrix->bdiaginv_comm_pkg = NULL;
matrix->bdiag_size = -1;
#if defined(HYPRE_USING_CUDA)
hypre_ParCSRMatrixSocDiagJ(matrix) = NULL;
hypre_ParCSRMatrixSocOffdJ(matrix) = NULL;
#endif
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix )
{
if (matrix)
{
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix);
if ( hypre_ParCSRMatrixOwnsData(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix));
if ( hypre_ParCSRMatrixDiagT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix));
}
if ( hypre_ParCSRMatrixOffdT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix));
}
if (hypre_ParCSRMatrixColMapOffd(matrix))
{
hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST);
}
if (hypre_ParCSRMatrixDeviceColMapOffd(matrix))
{
hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE);
}
if (hypre_ParCSRMatrixCommPkg(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix));
}
if (hypre_ParCSRMatrixCommPkgT(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix));
}
}
if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST);
}
if ( hypre_ParCSRMatrixOwnsColStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST);
}
/* RL: this is actually not correct since the memory_location may have been changed after allocation
* put them in containers TODO */
hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location);
hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location);
if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) )
{
hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix));
}
if ( hypre_ParCSRMatrixProcOrdering(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST);
}
hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST);
if (matrix->bdiaginv_comm_pkg)
{
hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg);
}
#if defined(HYPRE_USING_CUDA)
hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE);
hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE);
#endif
hypre_TFree(matrix, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location);
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location);
hypre_ParCSRMatrixColMapOffd(matrix) =
hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)),
HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix));
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixClone
* Creates and returns a new copy S of the argument A
* The following variables are not copied because they will be constructed
* later if needed: CommPkg, CommPkgT, rowindices, rowvalues
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *S;
S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) );
/* !!! S does not own Row/Col-Starts */
hypre_ParCSRMatrixSetRowStartsOwner(S, 0);
hypre_ParCSRMatrixSetColStartsOwner(S, 0);
hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixInitialize_v2(S, memory_location);
hypre_ParCSRMatrixCopy(A, S, copy_data);
return S;
}
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data)
{
return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A));
}
HYPRE_Int
hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location)
{
if (!A)
{
return hypre_error_flag;
}
HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A);
if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location) )
{
hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A));
hypre_ParCSRMatrixDiag(A) = A_diag;
hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A));
hypre_ParCSRMatrixOffd(A) = A_offd;
hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location);
hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location);
}
else
{
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location;
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
/* TODO in HYPRE_DEBUG ? */
hypre_CSRMatrixCheckSetNumNonzeros(diag);
hypre_CSRMatrixCheckSetNumNonzeros(offd);
if (format[0] == 'I')
{
HYPRE_BigInt total_num_nonzeros;
HYPRE_BigInt local_num_nonzeros;
local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) +
hypre_CSRMatrixNumNonzeros(offd) );
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros;
}
else if (format[0] == 'D')
{
HYPRE_Real total_num_nonzeros;
HYPRE_Real local_num_nonzeros;
local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) +
hypre_CSRMatrixNumNonzeros(offd) );
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1,
HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros;
}
else
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int");
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double");
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_data )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsData(matrix) = owns_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetRowStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_row_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetColStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_col_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRead
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_ParCSRMatrixRead( MPI_Comm comm,
const char *file_name )
{
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
HYPRE_BigInt global_num_rows, global_num_cols;
HYPRE_Int num_cols_offd;
HYPRE_Int local_num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_BigInt *col_map_offd;
FILE *fp;
HYPRE_Int equal = 1;
HYPRE_BigInt row_s, row_e, col_s, col_e;
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
fp = fopen(new_file_info, "r");
hypre_fscanf(fp, "%b", &global_num_rows);
hypre_fscanf(fp, "%b", &global_num_cols);
hypre_fscanf(fp, "%d", &num_cols_offd);
/* the bgl input file should only contain the EXACT range for local processor */
hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e);
row_starts[0] = row_s;
row_starts[1] = row_e;
col_starts[0] = col_s;
col_starts[1] = col_e;
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
{
hypre_fscanf(fp, "%b", &col_map_offd[i]);
}
fclose(fp);
for (i=1; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
diag = hypre_CSRMatrixRead(new_file_d);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (num_cols_offd)
{
offd = hypre_CSRMatrixRead(new_file_o);
}
else
{
offd = hypre_CSRMatrixCreate(local_num_rows,0,0);
hypre_CSRMatrixInitialize(offd);
}
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s;
hypre_ParCSRMatrixFirstColDiag(matrix) = col_s;
hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1;
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
{
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
}
hypre_ParCSRMatrixDiag(matrix) = diag;
hypre_ParCSRMatrixOffd(matrix) = offd;
if (num_cols_offd)
{
hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd;
}
else
{
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix,
const char *file_name )
{
MPI_Comm comm;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt *col_map_offd;
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
FILE *fp;
HYPRE_Int num_cols_offd = 0;
HYPRE_BigInt row_s, row_e, col_s, col_e;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix);
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
if (hypre_ParCSRMatrixOffd(matrix))
num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix));
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d);
if (num_cols_offd != 0)
hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o);
fp = fopen(new_file_info, "w");
hypre_fprintf(fp, "%b\n", global_num_rows);
hypre_fprintf(fp, "%b\n", global_num_cols);
hypre_fprintf(fp, "%d\n", num_cols_offd);
row_s = hypre_ParCSRMatrixFirstRowIndex(matrix);
row_e = hypre_ParCSRMatrixLastRowIndex(matrix);
col_s = hypre_ParCSRMatrixFirstColDiag(matrix);
col_e = hypre_ParCSRMatrixLastColDiag(matrix);
/* add 1 to the ends because this is a starts partition */
hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1);
for (i=0; i < num_cols_offd; i++)
hypre_fprintf(fp, "%b\n", col_map_offd[i]);
fclose(fp);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrintIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix,
const HYPRE_Int base_i,
const HYPRE_Int base_j,
const char *filename )
{
MPI_Comm comm;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Int myid, num_procs, i, j;
HYPRE_BigInt I, J;
char new_filename[255];
FILE *file;
HYPRE_Int num_nonzeros_offd;
HYPRE_BigInt ilower, iupper, jlower, jupper;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
num_rows = hypre_ParCSRMatrixNumRows(matrix);
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
}
ilower = row_starts[0]+(HYPRE_BigInt)base_i;
iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1;
jlower = col_starts[0]+(HYPRE_BigInt)base_j;
jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1;
hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper);
for (i = 0; i < num_rows; i++)
{
I = first_row_index + (HYPRE_BigInt)(i + base_i);
/* print diag columns */
for (j = diag_i[i]; j < diag_i[i+1]; j++)
{
J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j);
if ( diag_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(diag_data[j]), hypre_cimag(diag_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J);
}
/* print offd columns */
if ( num_nonzeros_offd )
{
for (j = offd_i[i]; j < offd_i[i+1]; j++)
{
J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j;
if ( offd_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(offd_data[j]), hypre_cimag(offd_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J );
}
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixReadIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixReadIJ( MPI_Comm comm,
const char *filename,
HYPRE_Int *base_i_ptr,
HYPRE_Int *base_j_ptr,
hypre_ParCSRMatrix **matrix_ptr)
{
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_rows;
HYPRE_BigInt big_base_i, big_base_j;
HYPRE_Int base_i, base_j;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_BigInt *aux_offd_j;
HYPRE_BigInt I, J;
HYPRE_Int myid, num_procs, i, i2, j;
char new_filename[255];
FILE *file;
HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd;
HYPRE_Int equal, i_col, num_cols;
HYPRE_Int diag_cnt, offd_cnt, row_cnt;
HYPRE_Complex data;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols);
hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd);
hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd);
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= num_procs; i++)
hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]);
big_base_i = row_starts[0];
big_base_j = col_starts[0];
base_i = (HYPRE_Int)row_starts[0];
base_j = (HYPRE_Int)col_starts[0];
equal = 1;
for (i = 0; i <= num_procs; i++)
{
row_starts[i] -= big_base_i;
col_starts[i] -= big_base_j;
if (row_starts[i] != col_starts[i]) equal = 0;
}
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
hypre_ParCSRMatrixInitialize(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
}
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1;
diag_cnt = 0;
offd_cnt = 0;
row_cnt = 0;
for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++)
{
/* read values */
hypre_fscanf(file, "%b %b %le", &I, &J, &data);
i2 = (HYPRE_Int)(I-big_base_i-first_row_index);
J -= big_base_j;
if (i2 > row_cnt)
{
diag_i[i2] = diag_cnt;
offd_i[i2] = offd_cnt;
row_cnt++;
}
if (J < first_col_diag || J > last_col_diag)
{
tmp_j[offd_cnt] = J;
offd_data[offd_cnt++] = data;
}
else
{
diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag);
diag_data[diag_cnt++] = data;
}
}
diag_i[num_rows] = diag_cnt;
offd_i[num_rows] = offd_cnt;
fclose(file);
/* generate col_map_offd */
if (num_nonzeros_offd)
{
aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_nonzeros_offd; i++)
aux_offd_j[i] = (HYPRE_BigInt)offd_j[i];
hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
col_map_offd[0] = aux_offd_j[0];
offd_cnt = 0;
for (i=1; i < num_nonzeros_offd; i++)
{
if (aux_offd_j[i] > col_map_offd[offd_cnt])
col_map_offd[++offd_cnt] = aux_offd_j[i];
}
for (i=0; i < num_nonzeros_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd);
}
hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
}
/* move diagonal element in first position in each row */
for (i=0; i < num_rows; i++)
{
i_col = diag_i[i];
for (j=i_col; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
diag_j[j] = diag_j[i_col];
data = diag_data[j];
diag_data[j] = diag_data[i_col];
diag_data[i_col] = data;
diag_j[i_col] = i;
break;
}
}
}
*base_i_ptr = base_i;
*base_j_ptr = base_j;
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetLocalRange
* returns the row numbers of the rows stored on this processor.
* "End" is actually the row number of the last row on this processor.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt *row_start,
HYPRE_BigInt *row_end,
HYPRE_BigInt *col_start,
HYPRE_BigInt *col_end )
{
HYPRE_Int my_id;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id );
*row_start = hypre_ParCSRMatrixFirstRowIndex(matrix);
*row_end = hypre_ParCSRMatrixLastRowIndex(matrix);
*col_start = hypre_ParCSRMatrixFirstColDiag(matrix);
*col_end = hypre_ParCSRMatrixLastColDiag(matrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetRow
* Returns global column indices and/or values for a given row in the global
* matrix. Global row number is used, but the row must be stored locally or
* an error is returned. This implementation copies from the two matrices that
* store the local data, storing them in the hypre_ParCSRMatrix structure.
* Only a single row can be accessed via this function at any one time; the
* corresponding RestoreRow function must be called, to avoid bleeding memory,
* and to be able to look at another row.
* Either one of col_ind and values can be left null, and those values will
* not be returned.
* All indices are returned in 0-based indexing, no matter what is used under
* the hood. EXCEPTION: currently this only works if the local CSR matrices
* use 0-based indexing.
* This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ
* matrix code, adjusted for our data and software structures.
* AJC 4/99.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int my_id;
HYPRE_BigInt row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat))
{
return(-1);
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id );
hypre_ParCSRMatrixGetrowactive(mat) = 1;
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
if (row < row_start || row >= row_end)
{
return(-1);
}
/* if buffer is not allocated and some information is requested,
allocate buffer */
if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values ))
{
/*
allocate enough space to hold information from the longest row.
*/
HYPRE_Int max = 1,tmp;
HYPRE_Int i;
HYPRE_Int m = row_end - row_start;
for ( i = 0; i < m; i++ )
{
tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] +
hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i];
if (max < tmp)
{
max = tmp;
}
}
hypre_ParCSRMatrixRowvalues(mat) =
(HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat));
hypre_ParCSRMatrixRowindices(mat) =
(HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat));
}
/* Copy from dual sequential matrices into buffer */
{
HYPRE_Complex *vworkA, *vworkB, *v_p;
HYPRE_Int i, *cworkA, *cworkB;
HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat);
HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start);
HYPRE_BigInt *cmap, *idx_p;
nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow];
cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow];
cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
nztot = nzA + nzB;
cmap = hypre_ParCSRMatrixColMapOffd(mat);
if (values || col_ind)
{
if (nztot)
{
/* Sort by increasing column numbers, assuming A and B already sorted */
HYPRE_Int imark = -1;
if (values)
{
*values = v_p = hypre_ParCSRMatrixRowvalues(mat);
for ( i = 0; i < nzB; i++ )
{
if (cmap[cworkB[i]] < cstart)
{
v_p[i] = vworkB[i];
}
else
{
break;
}
}
imark = i;
for ( i = 0; i < nzA; i++ )
{
v_p[imark+i] = vworkA[i];
}
for ( i = imark; i < nzB; i++ )
{
v_p[nzA+i] = vworkB[i];
}
}
if (col_ind)
{
*col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat);
if (imark > -1)
{
for ( i = 0; i < imark; i++ )
{
idx_p[i] = cmap[cworkB[i]];
}
}
else
{
for ( i = 0; i < nzB; i++ )
{
if (cmap[cworkB[i]] < cstart)
{
idx_p[i] = cmap[cworkB[i]];
}
else
{
break;
}
}
imark = i;
}
for ( i = 0; i < nzA; i++ )
{
idx_p[imark+i] = cstart + cworkA[i];
}
for ( i = imark; i < nzB; i++ )
{
idx_p[nzA+i] = cmap[cworkB[i]];
}
}
}
else
{
if (col_ind)
{
*col_ind = 0;
}
if (values)
{
*values = 0;
}
}
}
*size = nztot;
} /* End of copy */
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values);
}
else
#endif
{
return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRestoreRow
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
if (!hypre_ParCSRMatrixGetrowactive(matrix))
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixToParCSRMatrix:
*
* Generates a ParCSRMatrix distributed across the processors in comm
* from a CSRMatrix on proc 0 .
*
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm,
hypre_CSRMatrix *A,
HYPRE_BigInt *global_row_starts,
HYPRE_BigInt *global_col_starts )
{
hypre_ParCSRMatrix *parcsr_A;
HYPRE_BigInt *global_data;
HYPRE_BigInt global_size;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_Int num_procs, my_id;
HYPRE_Int *num_rows_proc;
HYPRE_Int *num_nonzeros_proc;
HYPRE_BigInt *row_starts = NULL;
HYPRE_BigInt *col_starts = NULL;
hypre_CSRMatrix *local_A;
HYPRE_Complex *A_data;
HYPRE_Int *A_i;
HYPRE_Int *A_j;
hypre_MPI_Request *requests;
hypre_MPI_Status *status, status0;
hypre_MPI_Datatype *csr_matrix_datatypes;
HYPRE_Int free_global_row_starts = 0;
HYPRE_Int free_global_col_starts = 0;
HYPRE_Int total_size;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
HYPRE_Int num_rows;
HYPRE_Int num_nonzeros;
HYPRE_Int i, ind;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
total_size = 4;
if (my_id == 0)
{
total_size += 2*(num_procs + 1);
}
global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
global_size = 3;
if (global_row_starts)
{
if (global_col_starts)
{
if (global_col_starts != global_row_starts)
{
/* contains code for what to expect,
if 0: global_row_starts = global_col_starts, only global_row_starts given
if 1: only global_row_starts given, global_col_starts = NULL
if 2: both global_row_starts and global_col_starts given
if 3: only global_col_starts given, global_row_starts = NULL */
global_data[3] = 2;
global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+num_procs+5] = global_col_starts[i];
}
}
else
{
global_data[3] = 0;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
}
}
else
{
global_data[3] = 1;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
}
}
else
{
if (global_col_starts)
{
global_data[3] = 3;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_col_starts[i];
}
}
}
global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A);
global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A);
global_data[2] = global_size;
A_data = hypre_CSRMatrixData(A);
A_i = hypre_CSRMatrixI(A);
A_j = hypre_CSRMatrixJ(A);
}
hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm);
global_num_rows = global_data[0];
global_num_cols = global_data[1];
global_size = global_data[2];
if (global_size > 3)
{
HYPRE_Int send_start;
if (global_data[3] == 2)
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 4 + (num_procs + 1);
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5 + (num_procs + 1);
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
}
else if ((global_data[3] == 0) || (global_data[3] == 1))
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
if (global_data[3] == 0)
{
col_starts = row_starts;
}
}
else
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
}
}
hypre_TFree(global_data, HYPRE_MEMORY_HOST);
// Create ParCSR matrix
parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, 0, 0, 0);
// Allocate memory for building ParCSR matrix
num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
if (!global_row_starts)
{
hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts);
free_global_row_starts = 1;
}
if (!global_col_starts)
{
hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts);
free_global_col_starts = 1;
}
for (i = 0; i < num_procs; i++)
{
num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]);
num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] -
A_i[(HYPRE_Int)global_row_starts[i]];
}
//num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]];
}
hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm);
hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm);
/* RL: this is not correct: (HYPRE_Int) global_num_cols */
local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros);
csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST);
for (i = 1; i < num_procs; i++)
{
ind = A_i[(HYPRE_Int) global_row_starts[i]];
hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i],
num_rows_proc[i],
&A_data[ind],
&A_i[(HYPRE_Int) global_row_starts[i]],
&A_j[ind],
&csr_matrix_datatypes[i]);
hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm,
&requests[i-1]);
hypre_MPI_Type_free(&csr_matrix_datatypes[i]);
}
hypre_CSRMatrixData(local_A) = A_data;
hypre_CSRMatrixI(local_A) = A_i;
hypre_CSRMatrixJ(local_A) = A_j;
hypre_CSRMatrixOwnsData(local_A) = 0;
hypre_MPI_Waitall(num_procs-1, requests, status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST);
hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST);
if (free_global_row_starts)
{
hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST);
}
if (free_global_col_starts)
{
hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST);
}
}
else
{
hypre_CSRMatrixInitialize(local_A);
hypre_BuildCSRMatrixMPIDataType(num_nonzeros,
num_rows,
hypre_CSRMatrixData(local_A),
hypre_CSRMatrixI(local_A),
hypre_CSRMatrixJ(local_A),
&csr_matrix_datatypes[0]);
hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0);
hypre_MPI_Type_free(csr_matrix_datatypes);
}
first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A);
last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A);
GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag);
/* set pointers back to NULL before destroying */
if (my_id == 0)
{
hypre_CSRMatrixData(local_A) = NULL;
hypre_CSRMatrixI(local_A) = NULL;
hypre_CSRMatrixJ(local_A) = NULL;
}
hypre_CSRMatrixDestroy(local_A);
hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST);
return parcsr_A;
}
/* RL: XXX this is not a scalable routine, see `marker' therein */
HYPRE_Int
GenerateDiagAndOffd(hypre_CSRMatrix *A,
hypre_ParCSRMatrix *matrix,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt last_col_diag)
{
HYPRE_Int i, j;
HYPRE_Int jo, jd;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *a_data = hypre_CSRMatrixData(A);
HYPRE_Int *a_i = hypre_CSRMatrixI(A);
/*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */
HYPRE_Int *a_j = hypre_CSRMatrixJ(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix);
HYPRE_BigInt *col_map_offd;
HYPRE_Complex *diag_data, *offd_data;
HYPRE_Int *diag_i, *offd_i;
HYPRE_Int *diag_j, *offd_j;
HYPRE_Int *marker;
HYPRE_Int num_cols_diag, num_cols_offd;
HYPRE_Int first_elmt = a_i[0];
HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt;
HYPRE_Int counter;
num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1);
num_cols_offd = 0;
HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A);
if (num_cols - num_cols_diag)
{
hypre_CSRMatrixInitialize_v2(diag, 0, memory_location);
diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrixInitialize_v2(offd, 0, memory_location);
offd_i = hypre_CSRMatrixI(offd);
marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols; i++)
{
marker[i] = 0;
}
jo = 0;
jd = 0;
for (i = 0; i < num_rows; i++)
{
offd_i[i] = jo;
diag_i[i] = jd;
for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
{
if (a_j[j] < first_col_diag || a_j[j] > last_col_diag)
{
if (!marker[a_j[j]])
{
marker[a_j[j]] = 1;
num_cols_offd++;
}
jo++;
}
else
{
jd++;
}
}
}
offd_i[num_rows] = jo;
diag_i[num_rows] = jd;
hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
counter = 0;
for (i = 0; i < num_cols; i++)
{
if (marker[i])
{
col_map_offd[counter] = (HYPRE_BigInt) i;
marker[i] = counter;
counter++;
}
}
hypre_CSRMatrixNumNonzeros(diag) = jd;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_j = hypre_CSRMatrixJ(diag);
hypre_CSRMatrixNumNonzeros(offd) = jo;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_CSRMatrixInitialize(offd);
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
{
if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag)
{
offd_data[jo] = a_data[j];
offd_j[jo++] = marker[a_j[j]];
}
else
{
diag_data[jd] = a_data[j];
diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag);
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
}
else
{
hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
for (i=0; i < num_nonzeros; i++)
{
diag_data[i] = a_data[i];
diag_j[i] = a_j[i];
}
offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows+1; i++)
{
diag_i[i] = a_i[i];
offd_i[i] = 0;
}
hypre_CSRMatrixNumCols(offd) = 0;
hypre_CSRMatrixI(offd) = offd_i;
}
return hypre_error_flag;
}
hypre_CSRMatrix *
hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix)
{
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
hypre_CSRMatrix *matrix;
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int *matrix_i;
HYPRE_BigInt *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int num_nonzeros, i, j;
HYPRE_Int count;
HYPRE_Int size, rest, num_threads, ii;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix);
num_nonzeros = diag_i[num_rows] + offd_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = memory_location;
hypre_CSRMatrixBigInitialize(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixBigJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
num_threads = hypre_NumThreads();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
HYPRE_Int ns, ne;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
count = diag_i[ns]+offd_i[ns];;
for (i = ns; i < ne; i++)
{
matrix_i[i] = count;
for (j=diag_i[i]; j < diag_i[i+1]; j++)
{
matrix_data[count] = diag_data[j];
matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag;
}
for (j=offd_i[i]; j < offd_i[i+1]; j++)
{
matrix_data[count] = offd_data[j];
matrix_j[count++] = col_map_offd[offd_j[j]];
}
}
} /* end parallel region */
matrix_i[num_rows] = num_nonzeros;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixToCSRMatrixAll:
* generates a CSRMatrix from a ParCSRMatrix on all processors that have
* parts of the ParCSRMatrix
* Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix);
hypre_CSRMatrix *matrix;
hypre_CSRMatrix *local_matrix;
HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix);
HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix);
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int *local_matrix_i;
HYPRE_Int *local_matrix_j;
HYPRE_Complex *local_matrix_data;
HYPRE_Int i, j;
HYPRE_Int local_num_rows;
HYPRE_Int local_num_nonzeros;
HYPRE_Int num_nonzeros;
HYPRE_Int num_data;
HYPRE_Int num_requests;
HYPRE_Int vec_len, offset;
HYPRE_Int start_index;
HYPRE_Int proc_id;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_types;
HYPRE_Int *used_procs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Int *new_vec_starts;
HYPRE_Int num_contacts;
HYPRE_Int contact_proc_list[1];
HYPRE_Int contact_send_buf[1];
HYPRE_Int contact_send_buf_starts[2];
HYPRE_Int max_response_size;
HYPRE_Int *response_recv_buf=NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
hypre_DataExchangeResponse response_obj;
hypre_ProcListElements send_proc_obj;
HYPRE_Int *send_info = NULL;
hypre_MPI_Status status1;
HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334;
HYPRE_Int start;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) -
hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1);
local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */
hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
/* determine procs that have vector data and store their ids in used_procs */
/* we need to do an exchange data for this. If I own row then I will contact
processor 0 with the endpoint of my local range */
if (local_num_rows > 0)
{
num_contacts = 1;
contact_proc_list[0] = 0;
contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix);
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 1;
}
else
{
num_contacts = 0;
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 0;
}
/*build the response object*/
/*send_proc_obj will be for saving info from contacts */
send_proc_obj.length = 0;
send_proc_obj.storage_length = 10;
send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = 10;
send_proc_obj.elements =
hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
max_response_size = 0; /* each response is null */
response_obj.fill_response = hypre_FillResponseParToCSRMatrix;
response_obj.data1 = NULL;
response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
hypre_DataExchangeList(num_contacts,
contact_proc_list, contact_send_buf,
contact_send_buf_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj,
max_response_size, 1,
comm, (void**) &response_recv_buf,
&response_recv_buf_starts);
/* now processor 0 should have a list of ranges for processors that have rows -
these are in send_proc_obj - it needs to create the new list of processors
and also an array of vec starts - and send to those who own row*/
if (my_id)
{
if (local_num_rows)
{
/* look for a message from processor 0 */
hypre_MPI_Probe(0, tag1, comm, &status1);
hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);
/* now unpack */
num_types = send_info[0];
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
for (i=1; i<= num_types; i++)
{
used_procs[i-1] = send_info[i];
}
for (i=num_types+1; i< count; i++)
{
new_vec_starts[i-num_types-1] = send_info[i] ;
}
}
else /* clean up and exit */
{
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
return NULL;
}
}
else /* my_id ==0 */
{
num_types = send_proc_obj.length;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
new_vec_starts[0] = 0;
for (i=0; i< num_types; i++)
{
used_procs[i] = send_proc_obj.id[i];
new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
}
hypre_qsort0(used_procs, 0, num_types-1);
hypre_qsort0(new_vec_starts, 0, num_types);
/*now we need to put into an array to send */
count = 2*num_types+2;
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
send_info[0] = num_types;
for (i=1; i<= num_types; i++)
{
send_info[i] = (HYPRE_BigInt)used_procs[i-1];
}
for (i=num_types+1; i< count; i++)
{
send_info[i] = new_vec_starts[i-num_types-1];
}
requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST);
/* don't send to myself - these are sorted so my id would be first*/
start = 0;
if (num_types && used_procs[0] == 0)
{
start = 1;
}
for (i=start; i < num_types; i++)
{
hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1,
comm, &requests[i-start]);
}
hypre_MPI_Waitall(num_types-start, requests, status);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
/* clean up */
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_info, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
/* now proc 0 can exit if it has no rows */
if (!local_num_rows)
{
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
return NULL;
}
/* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
/* this matrix should be rather small */
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
num_requests = 4*num_types;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* exchange contents of local_matrix_i - here we are sending to ourself also*/
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]);
hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
/* global numbering?*/
offset = matrix_i[new_vec_starts[1]];
for (i=1; i < num_types; i++)
{
for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[new_vec_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]];
num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
if (num_requests)
{
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCopy,
* copies B to A,
* if copy_data = 0, only the structure of A is copied to B
* the routine does not check whether the dimensions of A and B are compatible
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Int copy_data )
{
hypre_CSRMatrix *A_diag;
hypre_CSRMatrix *A_offd;
HYPRE_BigInt *col_map_offd_A;
hypre_CSRMatrix *B_diag;
hypre_CSRMatrix *B_offd;
HYPRE_BigInt *col_map_offd_B;
HYPRE_Int num_cols_offd_A;
HYPRE_Int num_cols_offd_B;
if (!A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!B)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
A_diag = hypre_ParCSRMatrixDiag(A);
A_offd = hypre_ParCSRMatrixOffd(A);
B_diag = hypre_ParCSRMatrixDiag(B);
B_offd = hypre_ParCSRMatrixOffd(B);
num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_assert(num_cols_offd_A == num_cols_offd_B);
col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrixCopy(A_diag, B_diag, copy_data);
hypre_CSRMatrixCopy(A_offd, B_offd, copy_data);
/* should not happen if B has been initialized */
if (num_cols_offd_B && col_map_offd_B == NULL)
{
col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
}
hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseParToCSRMatrix
* Fill response function for determining the send processors
* data exchange
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int i, index, count, elength;
HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2;
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for ids*/
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=10; /*add space for 10 more processors*/
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts =
hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
/*send proc*/
send_proc_obj->id[count] = contact_proc;
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 10);
elength += index;
send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
HYPRE_BigInt, elength, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
for (i=0; i< contact_size; i++)
{
send_proc_obj->elements[index++] = recv_contact_buf[i];
}
send_proc_obj->vec_starts[count+1] = index;
send_proc_obj->length++;
/*output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixUnion
* Creates and returns a new matrix whose elements are the union of A and B.
* Data is not copied, only structural information is created.
* A and B must have the same communicator, numbers and distributions of rows
* and columns (they can differ in which row-column pairs are nonzero, thus
* in which columns are in a offd block)
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A,
hypre_ParCSRMatrix * B )
{
hypre_ParCSRMatrix * C;
HYPRE_BigInt * col_map_offd_C = NULL;
HYPRE_Int num_procs, my_id, p;
MPI_Comm comm = hypre_ParCSRMatrixComm( A );
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A );
hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A );
hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A );
hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A );
hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B )
== hypre_ParCSRMatrixFirstRowIndex( A ) );
hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A );
hypre_ParCSRMatrixOwnsRowStarts( C ) = 0;
hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A );
hypre_ParCSRMatrixOwnsColStarts( C ) = 0;
for ( p=0; p<=num_procs; ++p )
hypre_assert( hypre_ParCSRMatrixColStarts(A)
== hypre_ParCSRMatrixColStarts(B) );
hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A );
hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A );
hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A );
hypre_ParCSRMatrixDiag( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B),
0, 0, 0 );
hypre_ParCSRMatrixOffd( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C );
hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C;
hypre_ParCSRMatrixCommPkg( C ) = NULL;
hypre_ParCSRMatrixCommPkgT( C ) = NULL;
hypre_ParCSRMatrixOwnsData( C ) = 1;
/* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce.
I suspect, but don't know, that other parts of hypre do not assume that
the correct values have been set.
hypre_ParCSRMatrixSetNumNonzeros( C );
hypre_ParCSRMatrixSetDNumNonzeros( C );*/
hypre_ParCSRMatrixNumNonzeros( C ) = 0;
hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0;
hypre_ParCSRMatrixRowindices( C ) = NULL;
hypre_ParCSRMatrixRowvalues( C ) = NULL;
hypre_ParCSRMatrixGetrowactive( C ) = 0;
return C;
}
/* drop the entries that are not on the diagonal and smaller than
* its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int type)
{
HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *marker_offd = NULL;
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int my_id, num_procs;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (tol <= 0.0)
{
return hypre_error_flag;
}
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0;
for (i = 0; i < nrow_local; i++)
{
/* compute row norm */
HYPRE_Real row_nrm = 0.0;
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Complex v = A_diag_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Complex v = A_offd_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
}
if (type == 2)
{
row_nrm = sqrt(row_nrm);
}
/* drop small entries based on tol and row norm */
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Int col = A_diag_j[j];
HYPRE_Complex val = A_diag_a[j];
if (i == col || fabs(val) >= tol * row_nrm)
{
A_diag_j[nnz_diag] = col;
A_diag_a[nnz_diag] = val;
nnz_diag ++;
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Int col = A_offd_j[j];
HYPRE_Complex val = A_offd_a[j];
/* in normal cases: diagonal entry should not
* appear in A_offd (but this can still be possible) */
if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm)
{
if (0 == marker_offd[col])
{
marker_offd[col] = 1;
}
A_offd_j[nnz_offd] = col;
A_offd_a[nnz_offd] = val;
nnz_offd ++;
}
}
}
A_diag_i_i = A_diag_i[i+1];
A_offd_i_i = A_offd_i[i+1];
A_diag_i[i+1] = nnz_diag;
A_offd_i[i+1] = nnz_offd;
}
hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag;
hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd;
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i])
{
col_map_offd_A[k] = col_map_offd_A[i];
marker_offd[i] = k++;
}
}
/* num_cols_A_offd = k; */
hypre_CSRMatrixNumCols(A_offd) = k;
for (i = 0; i < nnz_offd; i++)
{
A_offd_j[i] = marker_offd[A_offd_j[i]];
}
if ( hypre_ParCSRMatrixCommPkg(A) )
{
hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) );
}
hypre_MatvecCommPkgCreate(A);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Perform dual truncation of ParCSR matrix.
* This code is adapted from original BoomerAMGInterpTruncate()
* A: parCSR matrix to be modified
* tol: relative tolerance or truncation factor for dropping small terms
* max_row_elmts: maximum number of (largest) nonzero elements to keep.
* rescale: Boolean on whether or not to scale resulting matrix. Scaling for
* each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping),
* this way, the application of the truncated matrix on a constant vector is the same as that of
* the original matrix.
* nrm_type: type of norm used for dropping with tol.
* -- 0 = infinity-norm
* -- 1 = 1-norm
* -- 2 = 2-norm
*/
HYPRE_Int
hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int max_row_elmts,
HYPRE_Int rescale,
HYPRE_Int nrm_type)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime();
#endif
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_j_new;
HYPRE_Real *A_diag_data_new;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j_new;
HYPRE_Real *A_offd_data_new;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int i, j, start_j;
HYPRE_Int ierr = 0;
HYPRE_Int next_open;
HYPRE_Int now_checking;
HYPRE_Int num_lost;
HYPRE_Int num_lost_global=0;
HYPRE_Int next_open_offd;
HYPRE_Int now_checking_offd;
HYPRE_Int num_lost_offd;
HYPRE_Int num_lost_global_offd;
HYPRE_Int A_diag_size;
HYPRE_Int A_offd_size;
HYPRE_Int num_elmts;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Real row_nrm;
HYPRE_Real drop_coeff;
HYPRE_Real row_sum;
HYPRE_Real scale;
HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag);
HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd);
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * cum_lost_per_thread;
HYPRE_Int * num_lost_per_thread;
HYPRE_Int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. A_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, A_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine / num_threads) * my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine / num_threads) * (my_thread_num + 1);
}
/*
* Truncate based on truncation tolerance
*/
if (tol > 0)
{
num_lost = 0;
num_lost_offd = 0;
next_open = A_diag_i[start];
now_checking = A_diag_i[start];
next_open_offd = A_offd_i[start];;
now_checking_offd = A_offd_i[start];;
for (i = start; i < stop; i++)
{
row_nrm = 0;
/* compute norm for dropping small terms */
if (nrm_type == 0)
{
/* infty-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
row_nrm = (row_nrm < fabs(A_diag_data[j])) ?
fabs(A_diag_data[j]) : row_nrm;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_nrm = (row_nrm < fabs(A_offd_data[j])) ?
fabs(A_offd_data[j]) : row_nrm;
}
}
if (nrm_type == 1)
{
/* 1-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
row_nrm += fabs(A_diag_data[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_nrm += fabs(A_offd_data[j]);
}
}
if (nrm_type == 2)
{
/* 2-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
HYPRE_Complex v = A_diag_data[j];
row_nrm += v*v;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
HYPRE_Complex v = A_offd_data[j];
row_nrm += v*v;
}
row_nrm = sqrt(row_nrm);
}
drop_coeff = tol * row_nrm;
start_j = A_diag_i[i];
if (num_lost)
{
A_diag_i[i] -= num_lost;
}
row_sum = 0;
scale = 0;
for (j = start_j; j < A_diag_i[i+1]; j++)
{
row_sum += A_diag_data[now_checking];
if (fabs(A_diag_data[now_checking]) < drop_coeff)
{
num_lost++;
now_checking++;
}
else
{
scale += A_diag_data[now_checking];
A_diag_data[next_open] = A_diag_data[now_checking];
A_diag_j[next_open] = A_diag_j[now_checking];
now_checking++;
next_open++;
}
}
start_j = A_offd_i[i];
if (num_lost_offd)
{
A_offd_i[i] -= num_lost_offd;
}
for (j = start_j; j < A_offd_i[i+1]; j++)
{
row_sum += A_offd_data[now_checking_offd];
if (fabs(A_offd_data[now_checking_offd]) < drop_coeff)
{
num_lost_offd++;
now_checking_offd++;
}
else
{
scale += A_offd_data[now_checking_offd];
A_offd_data[next_open_offd] = A_offd_data[now_checking_offd];
A_offd_j[next_open_offd] = A_offd_j[now_checking_offd];
now_checking_offd++;
next_open_offd++;
}
}
/* scale row of A */
if (rescale && scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++)
{
A_diag_data[j] *= scale;
}
for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++)
{
A_offd_data[j] *= scale;
}
}
}
} /* end loop for (i = 0; i < n_fine; i++) */
/* store number of dropped elements and number of threads */
if (my_thread_num == 0)
{
max_num_threads[0] = num_threads;
}
num_lost_per_thread[my_thread_num] = num_lost;
num_lost_offd_per_thread[my_thread_num] = num_lost_offd;
} /* end if (trunc_factor > 0) */
/*
* Truncate based on capping the nnz per row
*
*/
if (max_row_elmts > 0)
{
HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd;
HYPRE_Int *A_aux_j;
HYPRE_Real *A_aux_data;
/* find maximum row length locally over this row range */
A_mxnum = 0;
for (i=start; i<stop; i++)
{
/* Note A_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = A_diag_i[i+1];
last_index_offd = A_offd_i[i+1];
if (i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i];
if (cnt1 > A_mxnum)
{
A_mxnum = cnt1;
}
}
/* Some rows exceed max_row_elmts, and require truncation. Essentially,
* each thread truncates and compresses its range of rows locally. */
if (A_mxnum > max_row_elmts)
{
num_lost = 0;
num_lost_offd = 0;
/* two temporary arrays to hold row i for temporary operations */
A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST);
A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST);
cnt_diag = A_diag_i[start];
cnt_offd = A_offd_i[start];
for (i = start; i < stop; i++)
{
/* Note A_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = A_diag_i[i+1];
last_index_offd = A_offd_i[i+1];
if (i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
row_sum = 0;
num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i];
if (max_row_elmts < num_elmts)
{
/* copy both diagonal and off-diag parts of row i to _aux_ arrays */
cnt = 0;
for (j = A_diag_i[i]; j < last_index; j++)
{
A_aux_j[cnt] = A_diag_j[j];
A_aux_data[cnt++] = A_diag_data[j];
row_sum += A_diag_data[j];
}
num_lost += cnt;
cnt1 = cnt;
for (j = A_offd_i[i]; j < last_index_offd; j++)
{
A_aux_j[cnt] = A_offd_j[j]+num_cols;
A_aux_data[cnt++] = A_offd_data[j];
row_sum += A_offd_data[j];
}
num_lost_offd += cnt-cnt1;
/* sort data */
hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1);
scale = 0;
if (i > start)
{
A_diag_i[i] = cnt_diag;
A_offd_i[i] = cnt_offd;
}
for (j = 0; j < max_row_elmts; j++)
{
scale += A_aux_data[j];
if (A_aux_j[j] < num_cols)
{
A_diag_j[cnt_diag] = A_aux_j[j];
A_diag_data[cnt_diag++] = A_aux_data[j];
}
else
{
A_offd_j[cnt_offd] = A_aux_j[j]-num_cols;
A_offd_data[cnt_offd++] = A_aux_data[j];
}
}
num_lost -= cnt_diag-A_diag_i[i];
num_lost_offd -= cnt_offd-A_offd_i[i];
/* scale row of A */
if (rescale && (scale != 0.))
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = A_diag_i[i]; j < cnt_diag; j++)
{
A_diag_data[j] *= scale;
}
for (j = A_offd_i[i]; j < cnt_offd; j++)
{
A_offd_data[j] *= scale;
}
}
}
} /* end if (max_row_elmts < num_elmts) */
else
{
/* nothing dropped from this row, but still have to shift entries back
* by the number dropped so far */
if (A_diag_i[i] != cnt_diag)
{
start_j = A_diag_i[i];
A_diag_i[i] = cnt_diag;
for (j = start_j; j < last_index; j++)
{
A_diag_j[cnt_diag] = A_diag_j[j];
A_diag_data[cnt_diag++] = A_diag_data[j];
}
}
else
{
cnt_diag += last_index-A_diag_i[i];
}
if (A_offd_i[i] != cnt_offd)
{
start_j = A_offd_i[i];
A_offd_i[i] = cnt_offd;
for (j = start_j; j < last_index_offd; j++)
{
A_offd_j[cnt_offd] = A_offd_j[j];
A_offd_data[cnt_offd++] = A_offd_data[j];
}
}
else
{
cnt_offd += last_index_offd-A_offd_i[i];
}
}
} /* end for (i = 0; i < n_fine; i++) */
num_lost_per_thread[my_thread_num] += num_lost;
num_lost_offd_per_thread[my_thread_num] += num_lost_offd;
hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST);
} /* end if (A_mxnum > max_row_elmts) */
} /* end if (max_row_elmts > 0) */
/* Sum up num_lost_global */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
num_lost_global = 0;
num_lost_global_offd = 0;
for (i = 0; i < max_num_threads[0]; i++)
{
num_lost_global += num_lost_per_thread[i];
num_lost_global_offd += num_lost_offd_per_thread[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
/* Each thread has it's own locally compressed CSR matrix from rows start
* to stop. Now, we have to copy each thread's chunk into the new
* process-wide CSR data structures
*
* First, we compute the new process-wide number of nonzeros (i.e.,
* A_diag_size), and compute cum_lost_per_thread[k] so that this
* entry holds the cumulative sum of entries dropped up to and
* including thread k. */
if (my_thread_num == 0)
{
A_diag_size = A_diag_i[n_fine];
for (i = 0; i < max_num_threads[0]; i++)
{
A_diag_size -= num_lost_per_thread[i];
if (i > 0)
{
cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1];
}
else
{
cum_lost_per_thread[i] = num_lost_per_thread[i];
}
}
A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag);
A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if (my_thread_num == 0)
{
next_open = 0;
}
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++)
{
A_diag_j_new[next_open] = A_diag_j[i];
A_diag_data_new[next_open] = A_diag_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update A_diag_i with number of dropped entries by all lower ranked
* threads */
if (my_thread_num > 0)
{
for (i=start; i<stop; i++)
{
A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if (my_thread_num == 0)
{
/* Set last entry */
A_diag_i[n_fine] = A_diag_size ;
hypre_TFree(A_diag_j, memory_location_diag);
hypre_TFree(A_diag_data, memory_location_diag);
hypre_CSRMatrixJ(A_diag) = A_diag_j_new;
hypre_CSRMatrixData(A_diag) = A_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size;
}
}
/*
* Synchronize and create new offd data structures
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (num_lost_global_offd)
{
/* Repeat process for off-diagonal */
if (my_thread_num == 0)
{
A_offd_size = A_offd_i[n_fine];
for (i = 0; i < max_num_threads[0]; i++)
{
A_offd_size -= num_lost_offd_per_thread[i];
if (i > 0)
{
cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1];
}
else
{
cum_lost_per_thread[i] = num_lost_offd_per_thread[i];
}
}
A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd);
A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if (my_thread_num == 0)
{
next_open = 0;
}
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++)
{
A_offd_j_new[next_open] = A_offd_j[i];
A_offd_data_new[next_open] = A_offd_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update A_offd_i with number of dropped entries by all lower ranked
* threads */
if (my_thread_num > 0)
{
for (i=start; i<stop; i++)
{
A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if (my_thread_num == 0)
{
/* Set last entry */
A_offd_i[n_fine] = A_offd_size ;
hypre_TFree(A_offd_j, memory_location_offd);
hypre_TFree(A_offd_data, memory_location_offd);
hypre_CSRMatrixJ(A_offd) = A_offd_j_new;
hypre_CSRMatrixData(A_offd) = A_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime();
#endif
return ierr;
}
|
ten_tusscher_2004_epi_S2_1.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_1.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
variables.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(void) {
int a = 2, b = 3, c = 4;
/*
Aici variabilele a = 2, b = 3 si c = 4 au aceste valori pe thread-ul MASTER.
De ce, avand in vedere ca in portiunea paralela intra si thread-ul 0 (MASTER),
la final cand afisez variabilele a si b raman valorile 2 si 3 de la inceput?
Inteleg ca valoarea c din 4 devine 3 pentru ca este partajata, dar daca a = 1
si b = 2 pentru fiecare thread in parte, la final la printf cand afisez valorile
a si b ale thread-ului MASTER, de ce nu se afiseaza 1 si 2?
*/
printf("Master thread id: %d\n", omp_get_thread_num());
#pragma omp parallel private(a, b) shared(c)
{
printf("Thread id: %d\n", omp_get_thread_num());
a = 1;
b = 2;
c = a + b;
}
printf("[%d] %d %d %d\n", omp_get_thread_num(), a, b, c);
return 0;
}
|
DECADES_decoupled.h | #pragma once
//#include "DECADES.h"
__attribute__((noinline))
extern "C"
int compute_exclusive_load(int *addr) {
int value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
int compute_exclusive_load_uint8(char *addr) {
int value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
float compute_exclusive_load_float(float *addr) {
float value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
double compute_exclusive_load_double(double *addr) {
double value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
long compute_exclusive_load_long(long *addr) {
long value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
uint64_t compute_exclusive_load_uint64t(uint64_t *addr) {
uint64_t value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
uint32_t compute_exclusive_load_uint32t(uint32_t *addr) {
uint32_t value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store(int *addr, int value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_ptr(int **addr, int *value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_uint8(char *addr,char value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_float(float *addr, float value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_double(double *addr, double value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_long(long *addr, long value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_uint64t(uint64_t *addr, uint64_t value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
void compute_exclusive_store_uint32t(uint32_t *addr, uint32_t value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
int compute_exclusive_fetch_add(int *addr, int value) {
int ret;
#pragma omp atomic capture
{
ret = addr[0];
addr[0] += value;
}
return ret;
}
__attribute__((noinline))
extern "C"
int compute_exclusive_fetch_min(int *addr, int value) {
return dec_atomic_fetch_min(addr, value);
}
__attribute__((noinline))
extern "C"
int compute_exclusive_compare_exchange(int *addr, int val1, int val2) {
return dec_atomic_compare_exchange(addr, val1, val2);
}
__attribute__((noinline))
extern "C"
void compute_side_store(int *addr, int value) {
*addr = value;
}
//__attribute__((noinline))
//extern "C"
//int compute_side_fetch_min(int *addr, int value) {
// return DECADES_FETCH_MIN(addr, value);/
//}
__attribute__((noinline))
extern "C"
float supply_exclusive_load_float(float *addr) {
float value = *addr;
return value;
}
__attribute__((noinline))
extern "C"
void supply_exclusive_store_float(float *addr, float value) {
*addr = value;
}
|
GB_unop__tgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tgamma_fp64_fp64)
// op(A') function: GB (_unop_tran__tgamma_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tgamma (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tgamma_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tgamma_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_thread_create_test_1.c | // execute in parallel
// input the number of thread
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include <omp.h>
#include <sys/timeb.h>
//#include <omp_interop.h>
#include <unistd.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void *test_fun(void *arg){
printf("omp_thread: %d\n", *((int*)arg));
return arg;
}
void *test_func_2(void *arg) {
printf("omp_thread: %d\n", *((int*)arg));
omp_thread_exit(arg);
}
int num_threads = 100;
int iter = 10000;
int main(int argc, char * argv[])
{
int tid;
if (argc >= 2){
omp_set_num_threads(atoi(argv[1]));
num_threads = atoi(argv[1]);
}
// omp_set_nested();
// create 50 threads and put them into threadpool
#pragma omp parallel private(tid) num_threads(2)
{
int tid = omp_get_thread_num();
}
int retval;
int* ret_value = &retval;
omp_thread_t ompthread_0;
int arg;
arg=0;
omp_thread_create(&ompthread_0, test_fun, (void*)(&arg), NULL);
omp_thread_join(&ompthread_0, (void**)(&ret_value));
printf("omp_thread 0 return: %d\n", *ret_value);
// while(1);
omp_thread_t ompthread_1;
void * stack = malloc(4098);
arg=1;
omp_thread_create(&ompthread_1, test_fun, (void*)(&arg), stack);
omp_thread_join(&ompthread_1, (void**)(&ret_value));
printf("omp_thread 1 return: %d\n", *ret_value);
omp_thread_t ompthread_2;
arg=2;
omp_thread_create(&ompthread_2, test_fun, (void*)(&arg), stack);
omp_thread_join(&ompthread_2, (void**)(&ret_value));
printf("omp_thread 2 return: %d\n", *ret_value);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.